repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
pamcastro/recipe-api-django
https://github.com/pamcastro/recipe-api-django
d18af67e66c06c8cffe1d1c7196d373e440902e9
c8a79ebce0e6d90e1b29a8349dd0adc494a3a248
82d1959d4e329cfd94ac8e847053849e7e5b3757
refs/heads/master
2020-08-04T02:46:04.358354
2019-10-04T23:46:45
2019-10-04T23:46:45
211,976,458
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8146341443061829, "alphanum_fraction": 0.8146341443061829, "avg_line_length": 21.66666603088379, "blob_id": "238cc62b3d6775165a0e6dd3f433f74e06f36746", "content_id": "bceb9dbab3b965965f650c1fc7f0c64326d20c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/core/admin.py", "repo_name": "pamcastro/recipe-api-django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import User\nfrom .models import Recipe\nfrom .models import Ingredient\n\n\nadmin.site.register(User)\nadmin.site.register(Ingredient)\nadmin.site.register(Recipe)\n\n" }, { "alpha_fraction": 0.7675507068634033, "alphanum_fraction": 0.7675507068634033, "avg_line_length": 16.351350784301758, "blob_id": "f985fe248c3e7f29cf7beec16b2dc419b390de79", "content_id": "e3a6f34759308ab3ef12115dfe3848513d0bebc0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 641, "license_type": "no_license", "max_line_length": 57, "num_lines": 37, "path": "/README.md", "repo_name": "pamcastro/recipe-api-django", "src_encoding": "UTF-8", "text": "# Recipe App API\nTo start the Django App, first clone the repo:\n\n## Getting started\nvirtualenv venv\nsource venv/bin/activate\npip install -r requirements.txt\n\n#### (Step-by-step)\n\n\n## Postgres \nsudo -u postgres psql\nalter user postgres with encrypted password 'secretpassword';\n\nOPEN A EXTERNAL TERMINAL : \n\nsudo -u postgres psql\nOR \npsql -U postgres\nin your terminal to get into postgres\n\nCREATE USER new_username;\nALTER USER new_username SUPERUSER CREATEDB;\n\n\n## REST API using DJANGO REST\n\n```\npython manage.py makemigrations\npython manage.py migrate\npython manage.py runserver\npython manage.py createsuperuser\npython manage.py runserver\n\n\n```" } ]
2
kim13-meet/MEET-YL1
https://github.com/kim13-meet/MEET-YL1
1960f1d15d4da5296799b12ac8b851e0940d491f
3e9297defefdd5256622f2e59eb7878db6a2ac03
7ed0e11d351a2867584edfdda2a6e4d7f8d2f28b
refs/heads/master
2020-05-17T16:28:25.959204
2014-02-27T15:11:30
2014-02-27T15:11:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5727699398994446, "alphanum_fraction": 0.5821596384048462, "avg_line_length": 16.83333396911621, "blob_id": "ddb46b820b936459417da0e4a315aa95300b437a", "content_id": "e173d776798c8aecc4b721f1f18c084b2120c821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 28, "num_lines": 12, "path": "/lab4/number.py", "repo_name": "kim13-meet/MEET-YL1", "src_encoding": "UTF-8", "text": "class Integer(object):\n\tdef __init__(self, number):\n\t\tself.number = number\n\n\tdef __Display__(self):\n\t\tprint self.number\n\nif __name__ == \"__main__\":\n\tx = Integer(8)\n\tx.__Display__()\n\ty = Integer(5)\n\tx.__Display__()" } ]
1
Gosha-iv/Wurzelimperium-Bot
https://github.com/Gosha-iv/Wurzelimperium-Bot
f9e1a6cf2e68cefa08a69038a65e9fc4dcf96760
e24e520718183eade80c25c2484fc073d885727c
b063d194c388344731eee98f62c2594a0598c00e
refs/heads/master
2023-06-02T14:05:32.341602
2021-06-20T11:26:36
2021-06-20T11:26:36
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7591361999511719, "alphanum_fraction": 0.7641196250915527, "avg_line_length": 21.148147583007812, "blob_id": "e8a07c0c8bde507b5881e61abf2d7b2eab97de43", "content_id": "0b728e8e8c7e73b0c375c17ee835e797f4aca316", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 607, "license_type": "no_license", "max_line_length": 72, "num_lines": 27, "path": "/example.py", "repo_name": "Gosha-iv/Wurzelimperium-Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport src.Main as main\n\n\n\"\"\"\nBeispieldatei zur Verwendung des Bots.\nAlle Stellen die angepasst werden müssen sind mit TODO gekennzeichnet.\n\"\"\"\n\n#TODO: Login Daten eintragen\nuser = ''\npw = ''\n\n#Login und Initialisierung des Bots\nwurzelBot = main.initWurzelBot()\nwurzelBot.launchBot(46, user, pw)\n\n#TODO: Aktionen definieren\n#Beispiel: Alles ernten, in allen Gärten Kürbis anbauen und alles gießen\nwurzelBot.harvestAllGarden()\nwurzelBot.growPlantsInGardens('Kürbis')\nwurzelBot.waterPlantsInAllGardens()\n\n#Deinitialisierung des Bots\nwurzelBot.exitBot()\n\n\n\n\n" }, { "alpha_fraction": 0.5424588322639465, "alphanum_fraction": 0.5462610721588135, "avg_line_length": 22.81818199157715, "blob_id": "fc68ce159165dfeefd6acc23c12be78c65f6373e", "content_id": "da9a1ed67df36d1b6bb018711d0b0572bec147b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 67, "num_lines": 33, "path": "/src/Lager.py", "repo_name": "Gosha-iv/Wurzelimperium-Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Storage():\n \n def __init__(self, httpConnection):\n self.__httpConn = httpConnection\n self.__products = {}\n\n\n def __resetNumbersInStock(self):\n for productID in self.__products.keys():\n self.__products[productID] = 0\n\n\n def initProductList(self, productList):\n \n for productID in productList:\n self.__products[str(productID)] = 0\n \n \n def updateNumberInStock(self):\n \"\"\"\n Führt ein Update des Lagerbestands für alle Produkte durch.\n \"\"\"\n \n self.__resetNumbersInStock()\n \n inventory = self.__httpConn.getInventory()\n \n for i in inventory:\n self.__products[i] = inventory[i]\n\n\n\n" }, { "alpha_fraction": 0.5994445085525513, "alphanum_fraction": 0.604154109954834, "avg_line_length": 33.932491302490234, "blob_id": "201bd59e4132239bf4a9f0a43c6e2fdc5e6e11ab", "content_id": "82e1be62f653004c101d2def665a69b653908e9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8310, "license_type": "no_license", "max_line_length": 134, "num_lines": 237, "path": "/src/WurzelBot.py", "repo_name": "Gosha-iv/Wurzelimperium-Bot", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nCreated on 21.03.2017\n\n@author: MrFlamez\n'''\n\nfrom src.Spieler import Spieler, Login\nfrom src.HTTPCommunication import HTTPConnection\nfrom src.Messenger import Messenger\nfrom src.Garten import Garden, AquaGarden\nfrom src.Lager import Storage\nfrom src.Marktplatz import Marketplace\nfrom src.Produktdaten import ProductData\nimport logging\n\n\nclass WurzelBot(object):\n \"\"\"\n Die Klasse WurzelBot übernimmt jegliche Koordination aller anstehenden Aufgaben.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n \n \"\"\"\n self.__logBot = logging.getLogger(\"bot\")\n self.__logBot.setLevel(logging.DEBUG)\n self.__HTTPConn = HTTPConnection()\n self.productData = ProductData(self.__HTTPConn)\n self.spieler = Spieler()\n self.messenger = Messenger(self.__HTTPConn)\n self.storage = Storage(self.__HTTPConn)\n self.garten = []\n self.wassergarten = None\n self.marktplatz = Marketplace(self.__HTTPConn)\n\n\n def __initGardens(self):\n \"\"\"\n Ermittelt die Anzahl der Gärten und initialisiert alle.\n \"\"\"\n try:\n tmpNumberOfGardens = self.__HTTPConn.getNumberOfGardens()\n self.spieler.numberOfGardens = tmpNumberOfGardens\n for i in range(1, tmpNumberOfGardens + 1):\n self.garten.append(Garden(self.__HTTPConn, i))\n \n if self.spieler.isAquaGardenAvailable() is True:\n self.wassergarten = AquaGarden(self.__HTTPConn)\n\n except:\n raise\n\n\n def __getAllFieldIDsFromFieldIDAndSizeAsString(self, fieldID, sx, sy):\n \"\"\"\n Rechnet anhand der fieldID und Größe der Pflanze (sx, sy) alle IDs aus und gibt diese als String zurück.\n \"\"\"\n if (sx == '1' and sy == '1'): return str(fieldID)\n if (sx == '2' and sy == '1'): return str(fieldID) + ',' + str(fieldID + 1)\n if (sx == '1' and sy == '2'): return str(fieldID) + ',' + str(fieldID + 17)\n if (sx == '2' and sy == '2'): return str(fieldID) + ',' + str(fieldID + 1) + ',' + str(fieldID + 17) + ',' + str(fieldID + 18)\n self.__logBot.debug('Error der plantSize --> sx: ' + sx + ' sy: ' + sy)\n\n\n def __getAllFieldIDsFromFieldIDAndSizeAsIntList(self, fieldID, sx, sy):\n \"\"\"\n Rechnet anhand der fieldID und Größe der Pflanze (sx, sy) alle IDs aus und gibt diese als Integer-Liste zurück.\n \"\"\"\n sFields = self.__getAllFieldIDsFromFieldIDAndSizeAsString(fieldID, sx, sy)\n listFields = sFields.split(',') #Stringarray\n \n for i in range(0, len(listFields)):\n listFields[i] = int(listFields[i])\n \n return listFields\n\n\n def launchBot(self, server, user, pw):\n \"\"\"\n Diese Methode startet und initialisiert den Wurzelbot. Dazu wird ein Login mit den\n übergebenen Logindaten durchgeführt und alles nötige initialisiert.\n \"\"\"\n self.__logBot.info('Starte Wurzelbot')\n loginDaten = Login(server=server, user=user, password=pw)\n\n try:\n self.__HTTPConn.logIn(loginDaten)\n except:\n self.__logBot.error('Problem beim Starten des Wurzelbots.')\n return\n\n try:\n self.spieler.setUserNameFromServer(self.__HTTPConn)\n except:\n self.__logBot.error('Username konnte nicht ermittelt werden.')\n\n\n try:\n self.spieler.setUserDataFromServer(self.__HTTPConn)\n except:\n self.__logBot.error('UserDaten konnten nicht aktualisiert werden')\n \n try:\n tmpHoneyFarmAvailability = self.__HTTPConn.isHoneyFarmAvailable(self.spieler.getLevelNr())\n except:\n self.__logBot.error('Verfügbarkeit der Imkerei konnte nicht ermittelt werden.')\n else:\n self.spieler.setHoneyFarmAvailability(tmpHoneyFarmAvailability)\n\n try:\n tmpAquaGardenAvailability = self.__HTTPConn.isAquaGardenAvailable(self.spieler.getLevelNr())\n except:\n self.__logBot.error('Verfügbarkeit des Wassergartens konnte nicht ermittelt werden.')\n else:\n self.spieler.setAquaGardenAvailability(tmpAquaGardenAvailability)\n\n try:\n self.__initGardens()\n except:\n self.__logBot.error('Anzahl der Gärten konnte nicht ermittelt werden.')\n \n self.spieler.accountLogin = loginDaten\n self.spieler.setUserID(self.__HTTPConn.getUserID())\n self.productData.initAllProducts()\n self.storage.initProductList(self.productData.getListOfAllProductIDs())\n self.storage.updateNumberInStock()\n\n\n def exitBot(self):\n \"\"\"\n Diese Methode beendet den Wurzelbot geordnet und setzt alles zurück.\n \"\"\"\n self.__logBot.info('Beende Wurzelbot')\n try:\n self.__HTTPConn.logOut()\n except:\n self.__logBot.error('Wurzelbot konnte nicht korrekt beendet werden.')\n else:\n self.__logBot.info('Logout erfolgreich.')\n\n\n def updateUserData(self):\n \"\"\"\n Ermittelt die Userdaten und setzt sie in der Spielerklasse.\n \"\"\"\n try:\n userData = self.__HTTPConn.readUserDataFromServer()\n except:\n self.__logBot.error('UserDaten konnten nicht aktualisiert werden')\n else:\n self.spieler.userData = userData\n\n\n def waterPlantsInAllGardens(self):\n \"\"\"\n Alle Gärten des Spielers werden komplett bewässert.\n \"\"\"\n for garden in self.garten:\n garden.waterPlants()\n \n if self.spieler.isAquaGardenAvailable():\n pass\n #self.waterPlantsInAquaGarden()\n\n\n def writeMessagesIfMailIsConfirmed(self, recipients, subject, body):\n \"\"\"\n Erstellt eine neue Nachricht, füllt diese aus und verschickt sie.\n recipients muss ein Array sein!.\n Eine Nachricht kann nur verschickt werden, wenn die E-Mail Adresse bestätigt ist.\n \"\"\"\n if (self.spieler.isEMailAdressConfirmed()):\n try:\n self.messenger.writeMessage(self.spieler.getUserName(), recipients, subject, body)\n except:\n self.__logBot.error('Konnte keine Nachricht verschicken.')\n else:\n pass\n\n \n def getEmptyFieldsOfGardens(self):\n \"\"\"\n Gibt alle leeren Felder aller normalen Gärten zurück.\n Kann dazu verwendet werden zu entscheiden, wie viele Pflanzen angebaut werden können.\n \"\"\"\n emptyFields = []\n try:\n for garden in self.garten:\n emptyFields.append(garden.getEmptyFields())\n except:\n self.__logBot.error('Konnte leere Felder von Garten ' + str(garden.getID()) + ' nicht ermitteln.')\n else:\n pass\n \n def harvestAllGarden(self):\n #TODO: Wassergarten ergänzen\n try:\n for garden in self.garten:\n garden.harvest()\n \n if self.spieler.isAquaGardenAvailable():\n pass#self.waterPlantsInAquaGarden()\n\n except:\n self.__logBot.error('Konnte nicht alle Gärten ernten.')\n else:\n pass\n\n\n def growPlantsInGardens(self, productName):\n \"\"\"\n Pflanzt so viele Pflanzen von einer Sorte wie möglich über alle Gärten hinweg an.\n \"\"\"\n product = self.productData.getProductByName(productName)\n if (product.isProductPlantable()):\n for garden in self.garten:\n garden.growPlant(product.getID(), product.getSX(), product.getSY())\n\n\n def test(self):\n #TODO: Für Testzwecke, kann später entfernt werden.\n #return self.__HTTPConn.getUsrList(1, 15000)\n \"\"\"\n tradeableProducts = self.marktplatz.getAllTradableProducts()\n for id in tradeableProducts:\n product = self.productData.getProductByID(id)\n print product.getName()\n gaps = self.marktplatz.findBigGapInProductOffers(product.getID(), product.getPriceNPC())\n if len(gaps) > 0:\n print gaps\n print ''\n \"\"\"\n #self.__HTTPConn.growPlantInAquaGarden(162, 9)\n self.wassergarten.waterPlants()\n\n\n" } ]
3
Treshch1/python_traning
https://github.com/Treshch1/python_traning
0ff28442ad559c7e3ed2dfcb5de0fc430ecb71cb
de796861b7227fab176d342b67cf47acbd2b166f
d0452eb707f82f892c236c7e70a15f561968cc05
refs/heads/master
2020-06-16T12:26:59.431595
2019-08-23T19:24:03
2019-08-23T19:24:03
195,573,687
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6782364249229431, "alphanum_fraction": 0.6876172423362732, "avg_line_length": 33.3870964050293, "blob_id": "20d35bcfe528504d71265c602135430dbc0e2f90", "content_id": "e919ca4b2130b2ffdf177d810b2469e8e99c0b24", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1066, "license_type": "permissive", "max_line_length": 113, "num_lines": 31, "path": "/test/test_group/test_edit_group.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from model.group import Group\nfrom random import randrange\nimport pytest\nimport random\nimport string\n\n\ndef random_string(prefix, maxlen):\n symbols = string.ascii_letters + string.digits + \" \"*10\n return prefix + \"\".join([random.choice(symbols) for i in range(random.randrange(maxlen))])\n\n\ntest_data = [\n Group(name=random_string(\"name\", 10), header=random_string(\"header\", 20), footer=random_string(\"footer\", 20))\n for i in range(2)\n]\n\n\[email protected](\"group\", test_data, ids=[repr(x) for x in test_data])\ndef test_edit_name(app, db, group):\n if len(db.get_group_list()) == 0:\n app.group.create(Group(name='new group name'))\n old_groups = db.get_group_list()\n editable_group = random.choice(old_groups)\n group.id = editable_group.id\n app.group.edit_group_by_id(group, group.id)\n new_groups = db.get_group_list()\n assert len(old_groups) == len(new_groups)\n old_groups.remove(editable_group)\n old_groups.append(group)\n assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)\n" }, { "alpha_fraction": 0.5900607705116272, "alphanum_fraction": 0.5926649570465088, "avg_line_length": 38.38461685180664, "blob_id": "81e72e5c1e62bcb44163546105648ccca1de5972", "content_id": "5b38a015af8ce82675cc48ddc4ef61cafc6ee8db", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9216, "license_type": "permissive", "max_line_length": 101, "num_lines": 234, "path": "/fixture/contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from model.contact import Contact\nimport re\n\n\nclass ContactHelper:\n\n def __init__(self, app):\n self.app = app\n\n def go_back_to_home_page(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"home page\").click()\n\n def go_to_home_page(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"home\").click()\n\n def create(self, contact):\n wd = self.app.wd\n self.open_create_contact_page()\n self.fill_contact_form(contact)\n wd.find_element_by_name(\"submit\").click()\n self.go_back_to_home_page()\n self.contact_cache = None\n\n def open_create_contact_page(self):\n wd = self.app.wd\n wd.find_element_by_link_text(\"add new\").click()\n\n def open_edit_contact_page_by_index(self, index):\n wd = self.app.wd\n self.go_to_home_page()\n wd.find_elements_by_xpath(\"//img[@title='Edit']\")[index].click()\n\n def open_edit_contact_page_by_id(self, id):\n wd = self.app.wd\n self.go_to_home_page()\n wd.find_element_by_xpath(f\"//input[@id='{id}']/../..//img[@title='Edit']\").click()\n\n def delete_first_contact(self):\n self.delete_contact_by_index(0)\n\n def delete_contact_by_index(self, index):\n wd = self.app.wd\n self.go_to_home_page()\n self.select_contact_by_index(index)\n wd.find_element_by_xpath(\"//input[@value='Delete']\").click()\n wd.switch_to.alert.accept()\n self.contact_cache = None\n\n def delete_contact_by_id(self, id):\n wd = self.app.wd\n self.go_to_home_page()\n self.select_contact_by_id(id)\n wd.find_element_by_xpath(\"//input[@value='Delete']\").click()\n wd.switch_to.alert.accept()\n self.contact_cache = None\n\n def select_contact_by_index(self, index):\n wd = self.app.wd\n wd.find_elements_by_name(\"selected[]\")[index].click()\n\n def select_contact_by_id(self, id):\n wd = self.app.wd\n wd.find_element_by_css_selector(f\"input[value='{id}']\").click()\n\n def edit_first_contact(self, contact):\n self.edit_contact_by_index(contact, 0)\n\n def edit_contact_by_index(self, contact, index):\n wd = self.app.wd\n self.go_to_home_page()\n self.open_edit_contact_page_by_index(index)\n self.fill_contact_form(contact)\n wd.find_element_by_name(\"update\").click()\n self.go_back_to_home_page()\n self.contact_cache = None\n\n def edit_contact_by_id(self, contact, id):\n wd = self.app.wd\n self.go_to_home_page()\n self.open_edit_contact_page_by_id(id)\n self.fill_contact_form(contact)\n wd.find_element_by_name(\"update\").click()\n self.go_back_to_home_page()\n self.contact_cache = None\n\n def fill_contact_form(self, contact):\n self.change_text_value(\"firstname\", contact.first_name)\n self.change_text_value(\"middlename\", contact.middle_name)\n self.change_text_value(\"lastname\", contact.last_name)\n self.change_text_value(\"nickname\", contact.nickname)\n self.change_text_value(\"title\", contact.title)\n self.change_text_value(\"company\", contact.company)\n self.change_text_value(\"address\", contact.address)\n self.change_text_value(\"home\", contact.home)\n self.change_text_value(\"mobile\", contact.mobile)\n self.change_text_value(\"work\", contact.work)\n self.change_text_value(\"fax\", contact.fax)\n self.change_text_value(\"email\", contact.email)\n self.change_text_value(\"email2\", contact.email_2)\n self.change_text_value(\"email3\", contact.email_3)\n self.change_text_value(\"homepage\", contact.homepage)\n self.fill_with_choice(\"bday\", contact.bday, 1)\n self.fill_with_choice(\"bmonth\", contact.bmonth, 1)\n self.change_text_value(\"byear\", contact.byear)\n self.fill_with_choice(\"aday\", contact.aday, 2)\n self.fill_with_choice(\"amonth\", contact.amonth, 2)\n self.change_text_value(\"ayear\", contact.ayear)\n self.change_text_value(\"address2\", contact.secondary_address)\n self.change_text_value(\"phone2\", contact.secondary_home)\n self.change_text_value(\"notes\", contact.notes)\n\n def fill_with_choice(self, field_name, choice, sequence_number=None):\n wd = self.app.wd\n if choice is not None:\n if field_name in [\"bmonth\", \"amonth\"]:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_xpath(f\"(//option[.='{choice}'])[{sequence_number}]\").click()\n else:\n wd.find_element_by_name(field_name).click()\n wd.find_element_by_xpath(f\"(//option[@value='{choice}'])[{sequence_number}]\").click()\n\n def change_text_value(self, field_name, text):\n wd = self.app.wd\n if text is not None:\n wd.find_element_by_name(field_name).clear()\n wd.find_element_by_name(field_name).send_keys(text)\n\n def count(self):\n wd = self.app.wd\n return len(wd.find_elements_by_name(\"selected[]\"))\n\n contact_cache = None\n\n def get_contact_list(self):\n if self.contact_cache is None:\n wd = self.app.wd\n self.go_to_home_page()\n self.contact_cache = []\n for element in wd.find_elements_by_name(\"entry\"):\n id = element.find_element_by_name(\"selected[]\").get_attribute(\"id\")\n last_name = element.find_elements_by_xpath(\".//td\")[1].text\n first_name = element.find_elements_by_xpath(\".//td\")[2].text\n all_phones= element.find_elements_by_xpath(\".//td\")[5].text\n self.contact_cache.append(Contact(id=id, first_name=first_name, last_name=last_name,\n all_phones_from_home_page=all_phones))\n return self.contact_cache\n\n def get_contact_by_id(self, id):\n wd = self.app.wd\n self.go_to_home_page()\n all_phones = wd.find_elements_by_xpath(f\"//*[@id='{id}']/../..//td\")[5].text\n return Contact(id=id, all_phones_from_home_page=all_phones)\n\n def get_contact_info_from_edit_page(self, index):\n wd = self.app.wd\n self.open_edit_contact_page_by_index(index)\n firstname = wd.find_element_by_name(\"firstname\").get_attribute(\"value\")\n lastname = wd.find_element_by_name(\"lastname\").get_attribute(\"value\")\n id = wd.find_element_by_name(\"id\").get_attribute(\"value\")\n homephone = wd.find_element_by_name(\"home\").get_attribute(\"value\")\n workphone = wd.find_element_by_name(\"work\").get_attribute(\"value\")\n mobilephone = wd.find_element_by_name(\"mobile\").get_attribute(\"value\")\n secondaryphone = wd.find_element_by_name(\"phone2\").get_attribute(\"value\")\n return Contact(first_name=firstname, last_name=lastname, id=id,\n home=homephone, work=workphone,\n mobile=mobilephone, secondary_home=secondaryphone)\n\n def open_contact_view_by_index(self, index):\n wd = self.app.wd\n self.go_to_home_page()\n details = wd.find_elements_by_xpath(\"//tr[@name='entry']//td[7]//img\")[index]\n details.click()\n\n def get_contact_from_view_page(self, index):\n wd = self.app.wd\n self.open_contact_view_by_index(index)\n text = wd.find_element_by_id(\"content\").text\n try:\n home = re.search(\"H: (.*)\", text).group(1)\n except AttributeError:\n home = None\n try:\n work = re.search(\"W: (.*)\", text).group(1)\n except AttributeError:\n work = None\n try:\n mobile = re.search(\"M: (.*)\", text).group(1)\n except AttributeError:\n mobile = None\n try:\n secondary_home = re.search(\"P: (.*)\", text).group(1)\n except AttributeError:\n secondary_home = None\n return Contact(home=home, work=work,\n mobile=mobile, secondary_home=secondary_home)\n\n def add_group_to_contact(self, contact, group):\n self.go_to_home_page()\n self.select_contact_by_id(contact.id)\n self.choose_group_to_add(group.id)\n self.click_add()\n\n def del_group_from_contact(self, contact, group):\n self.go_to_home_page()\n self.choose_filter_by_group(group.id)\n self.select_contact_by_id(contact.id)\n self.click_delete_from_group()\n self.go_to_home_page()\n self.choose_default_filter()\n\n def choose_group_to_add(self, id):\n wd = self.app.wd\n wd.find_element_by_name(\"to_group\").click()\n wd.find_elements_by_xpath(f\"//option[@value='{id}']\")[1].click()\n\n def choose_filter_by_group(self, id):\n wd = self.app.wd\n wd.find_element_by_name(\"group\").click()\n wd.find_elements_by_xpath(f\"//option[@value='{id}']\")[0].click()\n\n def choose_default_filter(self):\n wd = self.app.wd\n wd.find_element_by_name(\"group\").click()\n wd.find_element_by_xpath(\"//*[.='[all]']\").click()\n\n def click_add(self):\n wd = self.app.wd\n wd.find_element_by_name(\"add\").click()\n\n def click_delete_from_group(self):\n wd = self.app.wd\n wd.find_element_by_name(\"remove\").click()\n" }, { "alpha_fraction": 0.5875831246376038, "alphanum_fraction": 0.6042128801345825, "avg_line_length": 55.375, "blob_id": "c4d19db25362a12487196f96e150df676ccec115", "content_id": "5db14b529132bb8808b83d28d8109a889244eb0e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 902, "license_type": "permissive", "max_line_length": 108, "num_lines": 16, "path": "/test/test_contact/test_add_contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom model.contact import Contact\n\n\ndef test_add_contact(app, db):\n old_contacts = db.get_contact_list()\n contact = Contact(first_name=\"first name\", middle_name=\"middle name\", last_name=\"last name\",\n nickname=\"nickname\", title=\"title\", company=\"company\", address=\"address\",\n home=\"home\", mobile=\"mobile\", work=\"work\", fax=\"fax\", email=\"email\", email_2=\"email2\",\n email_3=\"email3\", homepage=\"homepage\", bday='3', bmonth=\"March\", byear=\"1997\",\n aday='3', amonth=\"April\", ayear=\"2016\", secondary_address=\"secondary address\",\n secondary_home=\"secondary_home\", notes=\"notes\")\n app.contact.create(contact)\n new_contacts = db.get_contact_list()\n old_contacts.append(contact)\n assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)\n" }, { "alpha_fraction": 0.6400591135025024, "alphanum_fraction": 0.6400591135025024, "avg_line_length": 41.28125, "blob_id": "bf756ae0db07cc216df8bb9ff20685b9f7f54aef", "content_id": "6ef02d55cb1758141a96376c4bd617895b1c80df", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1353, "license_type": "permissive", "max_line_length": 106, "num_lines": 32, "path": "/test/test_contact/test_phone.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "import re\nfrom random import randrange\n\n\ndef test_phones_on_home_page(app, db):\n contact_list = db.get_contact_list_phones()\n for contact in contact_list:\n contact_from_home_page = app.contact.get_contact_by_id(contact.id)\n assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact)\n\n\ndef test_phones_on_contact_view_page(app):\n contact_list = app.contact.get_contact_list()\n index = randrange(len(contact_list))\n contact_from_view_page = app.contact.get_contact_from_view_page(index)\n contact_from_edit_page = app.contact.get_contact_info_from_edit_page(index)\n assert contact_from_view_page.home == contact_from_edit_page.home\n assert contact_from_view_page.work == contact_from_edit_page.work\n assert contact_from_view_page.mobile == contact_from_edit_page.mobile\n assert contact_from_view_page.secondary_home == contact_from_edit_page.secondary_home\n\n\ndef clears(s):\n return re.sub(\"[() -]\", \"\", s)\n\n\ndef merge_phones_like_on_home_page(contact):\n return \"\\n\".join(filter(lambda x: x != \"\",\n map(lambda x: clears(x),\n filter(lambda x: x is not None,\n [contact.home, contact.mobile,\n contact.work, contact.secondary_home]))))\n" }, { "alpha_fraction": 0.5593471527099609, "alphanum_fraction": 0.5593471527099609, "avg_line_length": 26, "blob_id": "e5be4c49a11c4d489dc8775baa59dd9de86b139f", "content_id": "1b61846981a30bcc0f9152187a070f2ae49bec65", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "permissive", "max_line_length": 119, "num_lines": 25, "path": "/model/group.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from sys import maxsize\n\n\nclass Group:\n\n def __init__(self, name=None, header=None, footer=None, id=None):\n self.name = name\n self.header = header\n self.footer = footer\n self.id = id\n\n def __repr__(self):\n return f\"{self.id}, {self.name}, {self.header}, {self.footer}\"\n\n def __eq__(self, other):\n return (self.id is None or other.id is None or self.id == other.id) and self.name.strip() == other.name.strip()\n\n def id_or_max(self):\n if self.id:\n return int(self.id)\n else:\n return maxsize\n\n def clone(self):\n return Group(**{k: getattr(self, k) for k in (\"name\", \"header\")})" }, { "alpha_fraction": 0.6088379621505737, "alphanum_fraction": 0.6211129426956177, "avg_line_length": 52.130435943603516, "blob_id": "692484f8794fe67bb9f3bbb7b4f7fc92ded8a24c", "content_id": "3e622ba14bcc7a9968288c06bbfebf5888b1bd0d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1222, "license_type": "permissive", "max_line_length": 101, "num_lines": 23, "path": "/test/test_contact/test_edit_contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from model.contact import Contact\nimport random\n\n\ndef test_edit_first_contact(app, db):\n if len(db.get_contact_list())== 0:\n app.contact.create(Contact(first_name=\"first name\"))\n old_contacts = db.get_contact_list()\n editable_contact = random.choice(old_contacts)\n contact = Contact(first_name=\"first name\", middle_name=\"middle name\", last_name=\"last name\",\n nickname=\"nickname\", title=\"title\", company=\"company\", address=\"address\",\n home=\"home\", mobile=\"mobile\", work=\"work\", fax=\"fax\", email=\"email\",\n email_2=\"email2\", email_3=\"email3\", homepage=\"homepage\", bday='3',\n bmonth=\"March\", byear=\"1997\", aday='3', amonth=\"April\", ayear=\"2016\",\n secondary_address=\"secondary address\", secondary_home=\"secondary_home\",\n notes=\"notes\")\n contact.id = editable_contact.id\n app.contact.edit_contact_by_id(contact, contact.id)\n new_contacts = db.get_contact_list()\n assert len(old_contacts) == len(new_contacts)\n old_contacts.remove(editable_contact)\n old_contacts.append(contact)\n assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)\n" }, { "alpha_fraction": 0.6931818127632141, "alphanum_fraction": 0.6988636255264282, "avg_line_length": 34.20000076293945, "blob_id": "4a7dd1b901f52cfe9d3d496856ee9662ca8a81ba", "content_id": "10b38d294a76fc3a1f081f7e1083b4029379e559", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "permissive", "max_line_length": 101, "num_lines": 15, "path": "/test/test_contact/test_del_contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from model.contact import Contact\nimport random\nimport time\n\n\ndef test_delete_random_contact(app, db):\n if len(db.get_contact_list()) == 0:\n app.contact.create(Contact(first_name=\"first name\"))\n old_contacts = db.get_contact_list()\n contact = random.choice(old_contacts)\n app.contact.delete_contact_by_id(contact.id)\n time.sleep(0.1)\n old_contacts.remove(contact)\n new_contacts = db.get_contact_list()\n assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)\n" }, { "alpha_fraction": 0.5885542035102844, "alphanum_fraction": 0.5921686887741089, "avg_line_length": 34.319149017333984, "blob_id": "a695ce1e50de14d0cb3fdc140afe329484fd9a78", "content_id": "572cb4bf0d32c781dd968ff6bac4b1157a83c6e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1660, "license_type": "permissive", "max_line_length": 115, "num_lines": 47, "path": "/model/contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from sys import maxsize\n\n\nclass Contact:\n\n def __init__(self, amonth=None, aday=None, byear=None, bmonth=None, bday=None, homepage=None, email_3=None,\n email_2=None, email=None, fax=None, work=None, mobile=None, home=None, address=None, company=None,\n title=None, nickname=None, last_name=None, middle_name=None, first_name=None, ayear=None,\n secondary_address=None, notes=None, secondary_home=None, id=None, all_phones_from_home_page=None):\n self.amonth = amonth\n self.aday = aday\n self.byear = byear\n self.bmonth = bmonth\n self.bday = bday\n self.homepage = homepage\n self.email_3 = email_3\n self.email_2 = email_2\n self.email = email\n self.fax = fax\n self.work = work\n self.mobile = mobile\n self.home = home\n self.address = address\n self.company = company\n self.title = title\n self.nickname = nickname\n self.last_name = last_name\n self.middle_name = middle_name\n self.first_name = first_name\n self.ayear = ayear\n self.secondary_address = secondary_address\n self.notes = notes\n self.secondary_home = secondary_home\n self.id = id\n self.all_phones_from_home_page = all_phones_from_home_page\n\n def __repr__(self):\n return f\"{self.id}, {self.first_name}\"\n\n def __eq__(self, other):\n return (self.id is None or other.id is None or self.id == other.id) and self.first_name == other.first_name\n\n def id_or_max(self):\n if self.id:\n return int(self.id)\n else:\n return maxsize\n" }, { "alpha_fraction": 0.6694411635398865, "alphanum_fraction": 0.6741973757743835, "avg_line_length": 41.04999923706055, "blob_id": "b25a0f4a32ce84937374849b671352d0d7be00a4", "content_id": "586d4e4ed591db504e61a490362d14c733269635", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "permissive", "max_line_length": 67, "num_lines": 20, "path": "/test/test_groups_in_contacts/test_del_group_from_contact.py", "repo_name": "Treshch1/python_traning", "src_encoding": "UTF-8", "text": "from model.group import Group\nfrom model.contact import Contact\n\n\ndef test_del_group_from_contact(app, db, orm):\n if len(db.get_group_list()) == 0:\n app.group.create(Group(name='new group name'))\n if len(db.get_contact_list()) == 0:\n app.contact.create(Contact(first_name=\"first name\"))\n available_items = orm.get_available_contact_and_group_del()\n if not available_items:\n contact = db.get_contact_list()[0]\n group = db.get_group_list()[0]\n app.contact.add_group_to_contact(contact, group)\n available_items = orm.get_available_contact_and_group_del()\n contact = available_items[\"contact\"]\n group = available_items[\"group\"]\n assert orm.is_contact_in_group(contact, group)\n app.contact.del_group_from_contact(contact, group)\n assert not orm.is_contact_in_group(contact, group)\n" } ]
9
davidtso1219/playoff_bot
https://github.com/davidtso1219/playoff_bot
8b677e23ccf5cf82061ad1c5527feffe13db8db8
56ba5fc858d28c15086daa252a6952f9dbcd50cc
8107652b26173ebfebce8f73794898f01a41f29f
refs/heads/master
2022-12-09T08:28:19.175998
2020-09-08T00:38:57
2020-09-08T00:38:57
293,625,372
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7475247383117676, "alphanum_fraction": 0.7673267126083374, "avg_line_length": 66.33333587646484, "blob_id": "c8e86f39a248619976fddfbe881ea7c32d8f66b2", "content_id": "af23bdb9e5792a8b9cd0eb4351b589369092efb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 202, "license_type": "no_license", "max_line_length": 186, "num_lines": 3, "path": "/README.md", "repo_name": "davidtso1219/playoff_bot", "src_encoding": "UTF-8", "text": "# playoff_bot\n\nThis is a discord bot made by David Tso. You can use this bot to ask for some information about NBA 2020 playoffs. First type nba and the bot will tell you some information so check it out!\n" }, { "alpha_fraction": 0.4900341033935547, "alphanum_fraction": 0.5011671781539917, "avg_line_length": 38.78571319580078, "blob_id": "5684e607675e256cafc367ae1566938fe55d439b", "content_id": "3fb0461b0433a8ec6cbb254869736030f0a17069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5569, "license_type": "no_license", "max_line_length": 220, "num_lines": 140, "path": "/hackathon/mybot.py", "repo_name": "davidtso1219/playoff_bot", "src_encoding": "UTF-8", "text": "import discord\nfrom random import randrange\nimport requests\nimport datetime\nimport json\n\nwith open('/Users/petertso/Desktop/_Foothill/CS/club/hackathon/series_data.json') as f:\n series_data = json.load(f)\n\nclient = discord.Client()\n\n# url = 'https://www.balldontlie.io/api/v1/games?seasons=2019&start_date=2020-08-18&page='\n# response1 = requests.get(url + '1')\n# meta = response1.json()['meta']\n# pages = meta['total_pages']\n# data = response1.json()['data']\n\n# for i in range(2, pages + 1):\n# data += requests.get(url + str(i)).json()['data']\n\n# Event handlers\[email protected]\nasync def on_ready():\n print('Hey this is {0.user} '.format(client))\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n\n if message.content == 'nba':\n await message.channel.send('Hey I am the bot helping you get the data of NBA playoffs 2020')\n await message.channel.send(\"You can ask me with the following commands: \\n\\t*nba report* \\n\\t*nba who wins? (team) and (team)*\")\n\n if message.content.startswith('nba who wins?'):\n try:\n mylist = message.content.split()[3:]\n team1 = mylist[0]\n team2 = mylist[2]\n\n for series in series_data['data']:\n if (team1.lower() in series['higher_seed'].lower() and team2.lower() in series['lower_seed'].lower()) or (team2.lower() in series['higher_seed'].lower() and team1.lower() in series['lower_seed'].lower()):\n result = series['result']\n text = f\"The result is **{series['result']}**\"\n if '4' in result:\n if result[0] == '4':\n text += f\"\\n**{series['higher_seed']}** wins the series!!!\"\n else:\n text += f\"\\n**{series['lower_seed']}** wins the series!!!\"\n else:\n if int(result[0]) > int(result[4]):\n text += f\"\\n**{series['higher_seed']}** is leading the series\"\n elif int(result[0]) < int(result[4]):\n text += f\"\\n**{series['lower_seed']}** is leading the series\"\n else:\n text += \"\\n**Series Tied**\"\n text += \"\\n*The series hasn't ended..*\"\n\n break\n\n else:\n await message.channel.send('There is no matching series..')\n return\n\n await message.channel.send(text)\n except IndexError:\n await message.channel.send('*The format is wrong..*')\n \n if message.content == 'nba report':\n\n # def report(today=datetime.date.today()):\n # today_url = f'https://www.balldontlie.io/api/v1/games?seasons=2019&start_date={today}&end_date={today}'\n # today_response = requests.get(today_url)\n # data = today_response.json()['data']\n\n # num = 1\n\n # for game in data:\n # if game['period']:\n # text = f\"\\nGame {num}\\n\"\n \n # home = game['home_team']\n # visitor = game['visitor_team']\n # text += f\"{home['full_name']} vs. {visitor['full_name']}\\n\"\n # text += f\"\\tHome Score: {game['home_team_score']}\\n\"\n # text += f\"\\tVisitor Score: {game['visitor_team_score']}\\n\"\n\n # num += 1\n \n # await message.channel.send(text)\n # else:\n # report(today - datetime.timedelta(days=1))\n\n # report()\n today = datetime.date.today()\n today_url = f'https://www.balldontlie.io/api/v1/games?seasons=2019&start_date={today}&end_date={today}'\n today_response = requests.get(today_url)\n data = today_response.json()['data']\n\n num = 1\n\n for game in data:\n if game['period']:\n text = f\"------------------\\n*Game {num}*\\n\"\n \n home = game['home_team']\n visitor = game['visitor_team']\n text += f\"**{home['full_name']}** vs. **{visitor['full_name']}\\n**\"\n text += f\"\\tHome Score: **{game['home_team_score']}**\\n\"\n text += f\"\\tVisitor Score: **{game['visitor_team_score']}**\\n\"\n\n num += 1\n \n await message.channel.send(text)\n today_has_game = True \n\n if not today_has_game:\n await message.channel.send(\"There are no games to report today..\")\n await message.channel.send(\"Let me show you yesterday's games\")\n\n yesterday = today - datetime.timedelta(days=1)\n yesterday_url = f'https://www.balldontlie.io/api/v1/games?seasons=2019&start_date={yesterday}&end_date={yesterday}'\n yesterday_response = requests.get(yesterday_url)\n data = yesterday_response.json()['data']\n\n for game in data:\n if game['period']:\n text = f\"\\nGame {num}\\n\"\n \n home = game['home_team']\n visitor = game['visitor_team']\n text += f\"{home['full_name']} vs. {visitor['full_name']}\\n\"\n text += f\"\\tHome Score: {game['home_team_score']}\\n\"\n text += f\"\\tVisitor Score: {game['visitor_team_score']}\\n\"\n\n num += 1\n \n await message.channel.send(text)\n\nclient.run('NzUwMDc2Nzg2MDI4NzA3ODYx.X01RcQ.K_XXRWpsQG-85ngcZFVtZf2Ht6o')" }, { "alpha_fraction": 0.7020202279090881, "alphanum_fraction": 0.7020202279090881, "avg_line_length": 21.11111068725586, "blob_id": "83efb327a884a904a58c5ea478fdfefb15dc23e3", "content_id": "2404190afff260d1da60b5ef292d76ddadcad7d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 87, "num_lines": 9, "path": "/hackathon/test.py", "repo_name": "davidtso1219/playoff_bot", "src_encoding": "UTF-8", "text": "import json\n\nwith open('/Users/petertso/Desktop/_Foothill/CS/club/hackathon/series_data.json') as f:\n data = json.load(f)\n\ndata = data['data']\n\nfor series in data:\n print(series['conference'])" } ]
3
zzhacked/securityMonitor
https://github.com/zzhacked/securityMonitor
db32d26d0c809a51d74d2cdd44a1b0ee5a7eb847
6eab03981c0479062e44042a5bd83c8f44bc454a
19e6cb61985d69f0771af8a17ed60b3eae3924b5
refs/heads/master
2020-06-17T00:59:53.937124
2019-04-01T17:00:47
2019-04-01T17:00:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6109510064125061, "alphanum_fraction": 0.6253602504730225, "avg_line_length": 20.75, "blob_id": "0c058a203c5af6ee25d0d677e98ef2fbfb1e5536", "content_id": "2b8146e837a0cfacbe70d23572b00b9a4ec17019", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 43, "num_lines": 16, "path": "/connect_db.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "from pymongo import MongoClient\n\ndef enqury_data():\n\twordlist = []\n\tDBNAME = '' #数据库名\n\tDBUSERNAME = '' #数据库用户\n\tDBPASSWORD = '' #数据库密码\n\tDB = '' #数据库IP\n\tPORT = 27017\n\tdb_conn = MongoClient(DB, PORT)\n\tna_db = getattr(db_conn, DBNAME)\n\tna_db.authenticate(DBUSERNAME, DBPASSWORD)\n\treturn na_db\n\nif __name__ == '__main__':\n\tenqury_data()" }, { "alpha_fraction": 0.5861738920211792, "alphanum_fraction": 0.6635388731956482, "avg_line_length": 36.56834411621094, "blob_id": "b16b89468f928b9ceaa2496b3b1ff6c3979c5860", "content_id": "cde70e9b931af84f2b53d31b9a23e46ec7eaea95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5322, "license_type": "no_license", "max_line_length": 196, "num_lines": 139, "path": "/spider/get_wechat_info.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup as bs\nfrom lxml import etree\nimport time\nfrom PIL import Image\nfrom fateadm_api import FateadmApi\nimport datetime\nfrom selenium import webdriver\nfrom pymongo import MongoClient\nimport random\nimport re\n\nuser_agent = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'\n 'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',\n 'Opera/8.0 (Windows NT 5.1; U; en)',\n 'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50'\n]\n\ndef cookie_init():\n\tretries = 1\n\twhile retries < 3:\n\t\tcookie = {}\n\t\theaders = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) HeadlessChrome/65.0.3325.181 Safari/537.36'}\n\t\tchrome_options = webdriver.ChromeOptions()\n\t\tchrome_options.add_argument('--headless')\n\t\tchrome_options.add_argument('--disable-gpu')\n\t\tchrome_options.add_argument('--no-sandbox')\n\t\tchrome_options.add_argument('--disable-dev-shm-usage')\n\t\tclient = webdriver.Chrome(options=chrome_options)\n\t\tclient.get(\"https://weixin.sogou.com/antispider/?from=%2fweixin%3Ftype%3d2%26query%3d360CERT\")\n\t\tpath = './1.png'\n\t\timgpath = './yzm.png'\n\t\tclient.get_screenshot_as_file(path)\n\t\tim = Image.open(path)\n\t\tbox = (705, 598, 900, 680) # 设置要裁剪的区\n\t\tregion = im.crop(box)\n\t\tregion.save(imgpath)\n\t\tcapt = client.find_element_by_xpath('//*[@id=\"seccodeInput\"]')\n\t\ttest = FateadmApi('','','','') #打码平台接口\n\t\tcode = test.PredictFromFile('30600','./yzm.png') #打码平台识别\n\t\t#code = '123456'\n\t\tprint(code)\n\t\tcapt.send_keys(code)\n\t\ttime.sleep(1)\n\t\tclient.find_element_by_xpath('//*[@id=\"submit\"]').click()\n\t\ttime.sleep(2)\n\t\t#print(new_html)\n\t\tfor item in client.get_cookies():\n\t\t cookie[item[\"name\"]] = item[\"value\"]\n\t\ttry:\n\t\t\tprint(cookie['SNUID'])\n\t\texcept Exception:\n\t\t\tprint (\"解锁失败。重试次数:{0:d}\".format(3-retries))\n\t\t\tretries += 1\n\t\t\tcontinue\n\t\ttime.sleep(5)\n\t\treturn cookie['SNUID']\n\ndef get_info(url,table,a,tb_msg,headers):\n\tr = requests.get(url=url,headers=headers)\n\tr.encoding='utf-8'\n\t#print(url)\n\t#print(r.text)\n\ttry:\n\t\tsoup = bs(r.text,'html.parser')\n\t\tcontent = soup.find('a',{'uigs':'account_article_0'}).text\n\t\tsend_time = soup.find_all('span')[-1].find('script').text\n\t\tre_time = re.findall(r\"timeConvert\\('(.*?)'\\)\",send_time)[0]\n\t\t#print(re_time)\n\t\t#print(send_time)\n\t\t#print(content)\n\t\tnow_time = int(time.time())\n\t\tprint_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\t\ttimeArray = time.localtime(int(re_time))\n\t\totherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n\t\tava_time = now_time - int(re_time)\n\t\t#print(ava_time)\n\t\tmsg = content + ' from ' + table[a]\n\t\tdemo = {'time':otherStyleTime,'from':table[a],'content':content,'link':url}\n\t\ttb_msg.append(demo)\n\t\tDBNAME = ''\n\t\tDBUSERNAME = ''\n\t\tDB = '' #数据库地址\n\t\tPORT = 27017\n\t\tdb_conn = MongoClient(DB, PORT)\n\t\tna_db = getattr(db_conn, DBNAME)\n\t\tna_db.authenticate(DBUSERNAME, DBPASSWORD)\n\t\tc = na_db.wechatdatas\n\t\tc.update_one({\"content\": demo['content']}, {'$set': demo}, True)\n\t\t#print(tb_msg)\n\t\twith open('wechat_log.txt','a+') as f:\n\t\t\tf.write(print_time+'\\n')\n\texcept Exception as e:\n\t\t#print(e)\n\t\tpass\n\t\t\n\t\n\t\n\ndef wechat_info():\n\ttable = ['360CERT','长亭安全课堂','千里目实验室','云鼎实验室','ADLab']\n\ttb_msg = []\n\ta = -1\n\twith open('./snuid.txt') as f:\n\t\tsnuid = f.readline().strip()\n\t\theaders = {\n\t\t'Referer': 'http://weixin.sogou.com/weixin?type=1&query=python&ie=utf8&s_from=input&_sug_=n&_sug_type_=1&w=01015002&oq=&ri=5&sourceid=sugg&sut=0&sst0=1540733222633&lkt=0%2C0%2C0&p=40040108',\n\t\t'User-Agent': random.choice(user_agent),\n\t\t'Cookie': 'SUV=00D80B85458CAE4B5B299A407EA3A580;SNUID=' + snuid,\n\t\t}\n\t\trr = requests.get(url='https://weixin.sogou.com/weixin?type=1&s_from=input&query=360CERT&ie=utf8&_sug_=n&_sug_type_=',headers=headers)\n\tif len(rr.text) > 6000:\n\t\tpass\n\telse:\n\t\tuid = cookie_init()\n\t\theaders = {\n\t\t'Referer': 'http://weixin.sogou.com/weixin?type=1&query=python&ie=utf8&s_from=input&_sug_=n&_sug_type_=1&w=01015002&oq=&ri=5&sourceid=sugg&sut=0&sst0=1540733222633&lkt=0%2C0%2C0&p=40040108',\n\t\t'User-Agent': random.choice(user_agent),\n\t\t'Cookie': 'SUV=00D80B85458CAE4B5B299A407EA3A580;SNUID=' + uid,\n\t\t}\n\t\tff = open('./snuid.txt','w+')\n\t\tff.write(uid)\n\t\tff.close()\n\tfor i in table:\n\t\ta+=1\n\t\turl = 'https://weixin.sogou.com/weixin?type=1&s_from=input&query=' + i + '&ie=utf8&_sug_=n&_sug_type_='\n\t\tget_info(url,table,a,tb_msg,headers)\n\tprint(tb_msg)\n\treturn tb_msg\n\t\t\n\nif __name__ == '__main__':\n\twechat_info()\n" }, { "alpha_fraction": 0.5291750431060791, "alphanum_fraction": 0.536720335483551, "avg_line_length": 31.52458953857422, "blob_id": "588e4d24119a54b3a2c94fb46bcf311d9f2569e3", "content_id": "7075a56fad21cbe44524eef94834e020874372c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2034, "license_type": "no_license", "max_line_length": 95, "num_lines": 61, "path": "/spider/get_cve_info.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "import re\nimport time\nimport requests\nimport os\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup as bs\n\n\nnowtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\n\ndef get_CVE_urls():\n urls = []\n res = requests.get('https://cassandra.cerias.purdue.edu/CVE_changes/today.html')\n #print(res.text)\n targets = re.findall(r\"New entries:(.*?)Graduations\",res.text,re.S|re.M)\n for target in targets:\n soup = bs(target,'html.parser')\n tags = soup.find_all('a')\n #print(urls)\n for i in tags:\n url = i['href']\n urls.append(url)\n return urls\n\ndef CVE_info(): \n urls = get_CVE_urls()\n select_msg = ''\n wordlist = []\n keywords = []\n if(len(urls)==0):\n msg = nowtime + '<p>今日CVE_today风和日丽,无大事发生!!!</p>'\n return msg\n else:\n msg_header = '<p>今日CVE_today一共<font size=\"3\" color=\"red\">' + str(len(urls))+'</font>个。'\n for url in urls:\n res = requests.get(url, timeout=60)\n soup = bs(res.text, 'html.parser')\n cveId = soup.find(nowrap='nowrap').find('h2').string\n table = soup.find(id='GeneratedTable').find('table')\n company = table.find_all('tr')[8].find('td').string\n createdate = table.find_all('tr')[10].find('td').string\n content = table.find_all('tr')[3].find('td').text\n data = {'time':nowtime,'from':'CVE-Today-'+cveId,'content':content,'link':url}\n DBNAME = ''\n DBUSERNAME = ''\n DBPASSWORD = ''\n DB = '' #数据库地址\n PORT = 27017\n db_conn = MongoClient(DB, PORT)\n na_db = getattr(db_conn, DBNAME)\n na_db.authenticate(DBUSERNAME, DBPASSWORD)\n c = na_db.cvedatas\n c.update_one({\"content\": data['content']}, {'$set': data}, True)\n wordlist.append(data)\n return wordlist\n \n \n\nif __name__ == '__main__':\n CVE_info()\n " }, { "alpha_fraction": 0.6335561275482178, "alphanum_fraction": 0.6379349827766418, "avg_line_length": 35.47058868408203, "blob_id": "d9cde6173bc6b6d3ee58d25073882b56f15c6f91", "content_id": "6bb30084dd4a43eaad906eb20cca2b096b2c8e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4339, "license_type": "no_license", "max_line_length": 110, "num_lines": 119, "path": "/flask_demo.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "import re\nimport time\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom connect_db import enqury_data\n\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n na_db = enqury_data()\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n data_len = {'cve_len':na_db.cvedatas.count(),\n 'wechat_len':na_db.wechatdatas.count(),\n 'deepweb_len':na_db.deepwebdatas.count(),\n 'anquanke_len':na_db.anquankedatas.count(),\n 'seebug_len':na_db.seebugdatas.count(),\n }\n today_len = {\n 'cve_today':na_db.cvedatas.find({'time':re.compile(now_time)}).count(),\n 'wechat_today':na_db.wechatdatas.find({'time':re.compile(now_time)}).count(),\n 'deepweb_today':na_db.deepwebdatas.find({'time':re.compile(now_time)}).count(),\n 'anquanke_today':na_db.anquankedatas.find({'time':re.compile(now_time)}).count(),\n 'seebug_today':na_db.seebugdatas.find({'time':re.compile(now_time)}).count(),\n }\n return render_template('home.html',data_len=data_len,today_len=today_len)\n\[email protected]('/wechat', methods=['GET'])\ndef wechat():\n ress = []\n all_ress = []\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n na_db = enqury_data()\n collections = na_db.wechatdatas\n for res in collections.find().sort('time',-1):\n ress.append(res)\n for i in collections.find().sort('time',-1):\n all_ress.append(i)\n return render_template('wechat.html',ress=ress,all_ress=all_ress)\n\[email protected]('/deepweb', methods=['GET'])\ndef deepweb():\n deepdatas = []\n all_deepdatas = []\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n na_db = enqury_data()\n collections = na_db.deepwebdatas\n for res in collections.find({'time':re.compile(now_time)}).sort('time',-1):\n deepdatas.append(res)\n for i in collections.find().sort('time',-1):\n all_deepdatas.append(i)\n return render_template('deepweb.html',deepdatas=deepdatas,all_deepdatas=all_deepdatas)\n\[email protected]('/spider', methods=['GET'])\ndef spider():\n datas = []\n all_datas = []\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n na_db = enqury_data()\n #print(now_time)\n collections = na_db.cvedatas\n for res in collections.find({'time':re.compile(now_time)}).sort('time',-1):\n datas.append(res)\n for i in collections.find().sort('time',-1):\n all_datas.append(i)\n return render_template('spider.html',datas=datas,all_datas=all_datas)\n\n\[email protected]('/anquanke', methods=['GET'])\ndef anquanke():\n responses = []\n all_responses = []\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n na_db = enqury_data()\n collections = na_db.anquankedatas\n for res in collections.find({'time':re.compile(now_time)}).sort('time',-1):\n responses.append(res)\n for i in collections.find().sort('time',-1):\n all_responses.append(i)\n return render_template('anquanke.html',responses=responses,all_responses=all_responses)\n\[email protected]('/seebug', methods=['GET'])\ndef seebug():\n seebugs = []\n all_seebugs = []\n now_time = time.strftime('%Y-%m-%d', time.localtime(time.time()))\n na_db = enqury_data()\n collections = na_db.seebugdatas\n for res in collections.find({'time':re.compile(now_time)}).sort('time',-1):\n seebugs.append(res)\n for i in collections.find().sort('time',-1):\n all_seebugs.append(i)\n return render_template('seebug.html',seebugs=seebugs,all_seebugs=all_seebugs)\n\[email protected]('/search', methods=['GET','POST'])\ndef search():\n if request.method == 'POST':\n wordlists = []\n content = request.form['content']\n na_db = enqury_data()\n collections = [na_db.cvedatas,na_db.wechatdatas,na_db.deepdatas,na_db.anquankedatas,na_db.seebugdatas]\n for collection in collections:\n for res in collection.find({'content':re.compile(content,re.IGNORECASE)}).sort('time',-1):\n wordlists.append(res)\n #print(wordlists)\n return render_template('result.html',wordlists=wordlists)\n else:\n return render_template('search.html')\n\[email protected]('/dashboard', methods=['GET'])\ndef dashboard():\n return render_template('dashboard.html')\n \n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=5000,debug=True)" }, { "alpha_fraction": 0.8144690990447998, "alphanum_fraction": 0.8518086075782776, "avg_line_length": 24.939393997192383, "blob_id": "2cd4df8ba2b6d33cb80f43678fc898b3ecf8994b", "content_id": "b1330c22b74398fa9445986b2cbec8c16c536793", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1777, "license_type": "no_license", "max_line_length": 177, "num_lines": 33, "path": "/README.md", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "# securityMonitor\n\n### 环境\n\n基于Python3.6+,由于其中Seebug爬虫调用Pyppeteer库对Python环境有要求,故要求Python3.6+\n\n### 结构\n\n利用 Flask 写的简单Web展示,前端使用了部分layui的样式等。考虑到Flask在用装饰器定义路径时比较方便,如果使用Django或者Tornado显得更有条理一些。\n\nWeb 展示和爬虫部分是分开的。Spider模块里是一些爬虫的脚本,将爬虫得到的数据存储到Mongo,然后Web展示则是从Mongo里面读取数据。\n\n### 界面\n\n简单的界面如下:\n\n![](https://ws3.sinaimg.cn/large/006tKfTcly1g1nfxvawrij31mi0u07av.jpg)\n\n爬虫目前抓取了这几个数据源,后期可根据需要进行维护,添加自定义抓取的数据源。\n\n![](https://ws1.sinaimg.cn/large/006tKfTcly1g1ng04b7mjj31mj0u0dpp.jpg)\n\n数据展示分了两个 Tab,按照时间顺序分今日和历史存储的数据进行展示,另外配合搜索框进行检索。\n\n### 爬虫\n\n其实爬虫的数据是沿用了之前的一些,这里做了一些修改进行数据存储然后Web展示而已。\n\nSeebug 涉及到一些反爬,在搜索一些前人经验时,发现利用execjs执行整个js代码块时随着反爬升级总是会出现一些问题。于是想起去年先知白帽大会猪猪侠讲的Web 2.0启发式爬虫,其实有用到Pyppeteer这个库。其实这是node.js的Puppeteer库对Python的支持,使用这种方法暂时不会被反爬拦截,但是这个库要求Python3.6+。\n\n另外WeChat的爬虫是调用了搜狗微信的接口,但是也会遇到一些反爬的拦截,在前面的文章中补充了绕过反爬的方法,调用打码平台解封IP,生成Cookie。还有暗网的爬虫会涉敏,就不公布了。\n\n更多可参考博客poochi.cn\n\n" }, { "alpha_fraction": 0.6311941742897034, "alphanum_fraction": 0.6409423351287842, "avg_line_length": 23.639999389648438, "blob_id": "5228fde3f4fc9e86555e1e68f3d93b24b1742d92", "content_id": "d137c10201b2b8a832ba13f8c6a42f7d61188579", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1259, "license_type": "no_license", "max_line_length": 73, "num_lines": 50, "path": "/spider/get_anquanke_info.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "import requests\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport os\n\n\n\nnowtime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n\ndef anquanke_info():\n\turl = 'https://www.anquanke.com/'\n\t#keywords = config.keywords#关注的关键字\n\twordlist = []\n\tselect_msg = ''\n\tres = requests.get(url,timeout=60)\n\t#print(res.text)\n\tsoup = bs(res.text,'html.parser')\n\tdivs = soup.find_all('div',{'class':'title'})[0:9]\n\tspans = soup.find_all('span',{'class':'date'})\n\t#print(divs)\n\ti = 0 \n\tlist_date = [span.find('span').text for span in spans]\n\t#print(list_date)\n\tfor div in divs:\n\t\tcontent = div.find('a').string\n\t\t#print(description)\n\t\tsite = 'https://www.anquanke.com' + div.find('a')['href']\n\t\t#print(site)\n\t\tdata = {'time':list_date[i],'from':'安全客','content':content,'link':site}\n\t\ti +=1\n\t\tDBNAME = ''\n\t\tDBUSERNAME = ''\n\t\tDBPASSWORD = ''\n\t\tDB = '' #数据库地址\n\t\tPORT = 27017\n\t\tdb_conn = MongoClient(DB, PORT)\n\t\tna_db = getattr(db_conn, DBNAME)\n\t\tna_db.authenticate(DBUSERNAME, DBPASSWORD)\n\t\tc = na_db.anquankedatas\n\t\tc.update_one({\"content\": data['content']}, {'$set': data}, True)\n\t\twordlist.append(data)\n\t#print(wordlist)\n\treturn wordlist\n\t\t\t\t\n\t\t\t\t\n\n\nif __name__ == '__main__':\n\tanquanke_info()" }, { "alpha_fraction": 0.6400420665740967, "alphanum_fraction": 0.6489753127098083, "avg_line_length": 41.31111145019531, "blob_id": "c58134e24018cf0d1029ef3b90c376d831922239", "content_id": "f3e2140af6156d2ed87bc5d5714447dd03d3ef18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1913, "license_type": "no_license", "max_line_length": 116, "num_lines": 45, "path": "/spider/get_seebug_info.py", "repo_name": "zzhacked/securityMonitor", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n# encoding: utf-8\nimport asyncio\nimport requests\nfrom pymongo import MongoClient\nfrom bs4 import BeautifulSoup as bs\nfrom pyppeteer import launch\nasync def main():\n browser = await launch()\n page = await browser.newPage()\n await page.goto('http://www.seebug.org')\n await page.waitFor(\"body > div.footer-up\")\n #print(urls)\n wordlist = []\n vuln_time_elements = await page.xpath('//td[@class=\"text-center datetime hidden-sm hidden-xs td-time\"]')\n vuln_post_time = [await (await item.getProperty('textContent')).jsonValue() for item in vuln_time_elements][:10]\n #print(vuln_post_time)\n poc_time_elements = await page.xpath('//td[@class=\"td-time datetime hidden-sm hidden-xs\"]')\n poc_post_time = [await (await item.getProperty('textContent')).jsonValue() for item in poc_time_elements][1:]\n #print(poc_post_time)\n vulns_elements = await page.xpath('//td[@class=\"vul-title-wrapper\"]')\n vuln_content = [await(await item.getProperty('textContent')).jsonValue() for item in vulns_elements][:10]\n vuln_link_elements = await page.xpath('//td[@class=\"vul-title-wrapper\"]/a')\n vuln_link = [await(await item.getProperty('href')).jsonValue() for item in vuln_link_elements][:10]\n #print(vuln_link)\n for i in range(10):\n vuln_data = {'time':vuln_post_time[i],'link':vuln_link[i],'from':'Seebug','content':vuln_content[i]}\n DBNAME = ''\n DBUSERNAME = ''\n DBPASSWORD = ''\n DB = '' #数据库地址\n PORT = 27017\n db_conn = MongoClient(DB, PORT)\n na_db = getattr(db_conn, DBNAME)\n na_db.authenticate(DBUSERNAME, DBPASSWORD)\n c = na_db.seebugdatas\n c.update_one({\"content\": vuln_data['content']}, {'$set': vuln_data}, True)\n wordlist.append(vuln_data)\n print(wordlist)\n \n \n \n\n await browser.close()\nasyncio.get_event_loop().run_until_complete(main())" } ]
7
uatkhan/TEST
https://github.com/uatkhan/TEST
e75224338dbc84058d890188f43fe187a5f29111
61aed5c30e7aed1573a931052cd99f6d834a204c
7a4998d5d59ea005ccb289db794fccc2da2e51b9
refs/heads/master
2020-04-02T02:37:22.765618
2018-10-20T16:46:59
2018-10-20T16:46:59
153,918,649
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5472702980041504, "alphanum_fraction": 0.5605858564376831, "avg_line_length": 20.428571701049805, "blob_id": "d881b63e7aaaf32d7fa2071041a5e56eb40030fe", "content_id": "56325526faca3b15469e84cd2a91ca2268bbd4c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 48, "num_lines": 35, "path": "/main.py", "repo_name": "uatkhan/TEST", "src_encoding": "UTF-8", "text": "from flask import Flask,jsonify\n\nclass User:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n def serialize(self):\n return {\n 'name': self.name,\n 'age': self.age,\n }\n\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \"Hello World!\"\n\[email protected](\"/user\", methods = [\"GET\"])\ndef get_user():\n yerbolat = User(name = \"Yerbolat\", age = 33)\n return jsonify(yerbolat.serialize())\n\[email protected](\"/users\", methods = [\"GET\"])\ndef get_users():\n userlist = []\n user1 = User(\"Vasya\", 18)\n user2 = User(\"John\", 28)\n userlist.append(user1.serialize())\n userlist.append(user2.serialize())\n return jsonify(userlist)\n\nif __name__ == \"__main__\":\n app.run()\n\n" } ]
1
macqueen09/Mnist_use_tensorflow
https://github.com/macqueen09/Mnist_use_tensorflow
e5005b716e8f71094bc24df87a1c3a7d52ebebbe
1328b2f4bab1375391990188b3984f9428c80719
186bb896c02d565fbc70052e723dffde6723601f
refs/heads/master
2021-04-27T02:36:37.988668
2018-02-24T03:26:04
2018-02-24T03:26:04
122,698,591
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6340388059616089, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 26.621952056884766, "blob_id": "b93efae11eb800ff570b10f7aa1da83b496fe9d9", "content_id": "c1cf78d85f46188218f39442bff7260edf26e1fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2268, "license_type": "no_license", "max_line_length": 80, "num_lines": 82, "path": "/mni_tens_all.py", "repo_name": "macqueen09/Mnist_use_tensorflow", "src_encoding": "UTF-8", "text": "\n\n## 1 ready data\n\n# download data\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\n\n# # pylint: disable=unused-import\n# import gzip\n# import os\n# import tempfile\n\n# import numpy\n# from six.moves import urllib\n# from six.moves import xrange # pylint: disable=redefined-builtin\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n## 2 run Interactive Session\nimport tensorflow as tf\nsess = tf.InteractiveSession()\n\n## softmax model\nx = tf.placeholder(\"float\", shape = [None, 784])\ny_ = tf.placeholder(\"float\", shape = [None, 10])\n\nW = tf.Variable(tf.zeros([784,10]))\nb = tf.Variable(tf.zeros([10]))\n\nsess.run(tf.initialize_all_variables())\n\n## 3 predict and loss\ny = tf.nn.softmax(tf.matmul(x,W) + b)\ncross_entropy = -tf.reduce_sum(y_ * tf.log(y))\n\n## train\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\nfor i in range(1000):\n\tbatch = mnist.train.next_batch(50)\n\ttrain_step.run(feed_dict = {x: batch[0], y_: batch[1]})\n\ncorrect_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\nprint accuracy.eval(feed_dict = {x: mnist.test.images, y_: mnist.test.labels})\n## evaluation\n\ndef weight_variable(shape):\n\tinitial = tf.truncated_normal(shape, stddev = 0.1)\n\treturn tf.Variable(initial)\n\ndef bias_variable(shape):\n\tinitial = tf.constant(0.1, shape = shape)\n\treturn tf.Variable(initial)\n\ndef conv2d(x, W):\n\treturn tf.nn.conv2d(x, W, strides = [1,1,1,1], padding = 'SAME')\n\ndef max_pool_2x2(x):\n\treturn tf.nn.max_pool(x, ksize = [1,2,2,1], strides[1,2,2,1], padding = 'SAME')\n\n## a1 conv\nW_conv1 = weight_variable([5,5,1,32])\nb_conv1 = bias_variable([32])\n\nx_image = tf.reshape(x, [-1,28,28,1])\n\nh_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\nh_pool1 = max_pool_2x2(h_conv1)\n\n# conv2\nW_conv2 = weight_variable([5,5,32,64])\nb_conv2 = bias_variable([64])\n\nh_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)\nh_pool2 = max_pool_2x2(h_conv2)\n\nW_fc1 = weight_variable([7 * 7 * 64, 1024])\nb_fc = bias_variable([1024])\n\nh_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*64])\nf_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)\n\n" }, { "alpha_fraction": 0.8307692408561707, "alphanum_fraction": 0.8307692408561707, "avg_line_length": 31.5, "blob_id": "54d9bc16f617ca8b3158d0108205f67478c17fce", "content_id": "d24bb69ea8e57226831aa8acd12255f2ad95faae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 65, "license_type": "no_license", "max_line_length": 41, "num_lines": 2, "path": "/README.md", "repo_name": "macqueen09/Mnist_use_tensorflow", "src_encoding": "UTF-8", "text": "# Mnist_use_tensorflow\nuse DeepLearning method make mnist better\n" } ]
2
Pelielo/nd027-capstone-project
https://github.com/Pelielo/nd027-capstone-project
95fcc7b060ca6585e645587e6b661d6ea7868619
8544760ba73b1d2d3c3edfc1da607fee59cef48c
80d1dff0cedae991475012026ced7e5c958046b9
refs/heads/master
2022-11-18T00:18:02.884895
2020-04-13T16:31:56
2020-04-13T16:31:56
249,069,320
0
0
null
2020-03-21T22:07:56
2020-04-13T16:32:05
2022-11-11T07:52:17
Python
[ { "alpha_fraction": 0.7626532912254333, "alphanum_fraction": 0.76633220911026, "avg_line_length": 75.02542114257812, "blob_id": "9ddf29b332740c14e37b269f256e8b18bf8bc69a", "content_id": "d26a8f4d772fdae8949bd7e1adefc35d4ef47e5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8970, "license_type": "no_license", "max_line_length": 505, "num_lines": 118, "path": "/README.md", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "# City Recommendation System\n\nThis is the **capstone project** of Udacity's [Data Engineering Nanodegree](https://www.udacity.com/course/data-engineer-nanodegree--nd027).\n\n## Introduction\n\nThe purpose of this project is to serve as a recommendation system for cities of the United States, taking several factores into consideration and ultimately recommending the best city to live in for someone.\n\nIt is a data pipeline orchestrated by **Airflow** consisting of extracting data from diverse sources, storing them in a **S3 Bucket** and then processing them using **Spark** into a **Data Warehouse** in **Redshift**. The populated database can be used with ad-hoc queries to gather insight into the best features of each city or be used as a ranking system to evaluate the best city overall tanking into account the person's interest.\n\n## Resources\n\n* [`create_cluster.py`](./create_cluster.py) is a script to create a Redshift cluster (and everything else necessary) using the provided credentials in the `settings.cfg` file.\n* [`delete_cluster.py`](./delete_cluster.py) is a script to delete the Redshift cluster and remove the role created to access S3 buckets.\n* [`create_tables.py`](./create_tables.py) is a script to (re)create the dimension and fact tables for the star schema in Redshift.\n* [`sql_statements.py`](./sql_statements.py) is used to define the SQL statements which will be imported into [`create_tables.py`](./create_tables.py) file.\n* [`docker-compose.dev.yml`](./docker-compose.dev.yml) is a docker-compose file to create an Airflow instance running the webserver, a Postgres database, a Spark master node and a Spark worker node to be used for local development.\n* [`cities.py`](./airflow/dags/cities.py) is a DAG file to download US city data, upload to S3, process using Spark and load into Redshift.\n* [`employment.py`](./airflow/dags/employment.py) is a DAG file to download US employment data, upload to S3, process using Spark and load into Redshift.\n* [`weather.py`](./airflow/dags/weather.py) is a DAG file to download US weather data, upload to S3, process using Spark and load into Redshift.\n* The `settings.cfg` file should have the following structure and its values must be filled in.\n\n```ini\n[AWS]\nKEY=\nSECRET=\n\n[CLUSTER] \nCLUSTER_TYPE=multi-node\nNUM_NODES=4\nNODE_TYPE=dc2.large\n\nIAM_ROLE_NAME=\nCLUSTER_IDENTIFIER=\nHOST=\nDB_NAME=\nDB_USER=\nDB_PASSWORD=\nDB_PORT=\n\n[IAM_ROLE]\nARN=\n```\n## Data Sources\n\n* City and Population\n * Source: [simplemaps](https://simplemaps.com/data/us-cities)\n * Information: city, state, state_code, country, latitude, longitude, population, density.\n* Employment numbers\n * Source: [US Government Bureau of Labor Statistics](https://www.bls.gov/help/hlpforma.htm#SM)\n * Information: people_employed, state, year, month\n* Weather\n * Source: [Kaggle dataset for Land Temperatures](https://www.kaggle.com/berkeleyearth/climate-change-earth-surface-temperature-data#GlobalLandTemperaturesByCity.csv) and [OpenCage geocoding API](https://opencagedata.com/api) to retrieve a state from a geocode.\n * Information: avg_temp, avg_temp_uncertainty, city, state, date\n \n## Getting Started\n\n* Run [`docker-compose.dev.yml`](./docker-compose.dev.yml) to setup the environment, containing an Airflow instance and Spark nodes.\n * Add connections to airflow\n * `s3_conn`: access key and secret key to upload files to S3 bucket.\n * `aws_credentials`: access key and secret access key to read S3 information from Spark and to manage Redshift instance.\n * Add variables to airflow\n * `kaggle_username`: username of the Kaggle user to download dataset.\n * `kaggle_api_key`: key used to authenticate to Kaggle API.\n * `bls_api_key`: key used to authenticate to BLS API and avoid daily request limits\n * `opencage_api_key`: key used to authenticate to OpenCage API.\n* Run [`create_cluster.py`](./create_cluster.py) to setup a Redshift cluster or create an instance via AWS console.\n* Run [`create_tables.py`](./create_tables.py) to create the tables used in Redshift.\n\nAlternatively, in a **production setting**, one could use a **EC2 instance** running Airflow and a **EMR cluster** to connect to.\n\n## Data Pipeline\n\nThe Data Pipeline is orchestrated by Airflow using three DAGs:\n* [`cities.py`](./airflow/dags/cities.py) begins by downloading a `.zip` file, extracting its contents into a `.csv` file, uploading to a S3 bucket, which is then processed by Spark to filter unused rows, choose the appropriate columns and rename when necessary. The processed data is then uploaded to S3 so that a `COPY` command can be executed, loading the `dim_cities` table in Redshift. The final task is used as a data quality check to ensure that everything was loaded correctly.\n* [`employment.py`](./airflow/dags/employment.py) DAG works similarly, but the data source is a Web API that returns `.json` structured data. The `spark_processor` task is also slightly different because it needs to explode the deeply nested data resulted from the `POST` call to the API. The data is retrieved **monthly**, as the source is updated.\n* [`weather.py`](./airflow/dags/weather.py) is also similar, but the source is a Kaggle dataset and the `spark_processor` task has to filter the data from United States and since the dataset only has city names (that can have duplicates across states), it needs to make a request to the OpenCage API using a lat, long tuple to return the state located in those coordinates.\n\n![Cities DAG](./resources/cities_dag.png)\n![Employment DAG](./resources/employment_dag.png)\n![Weather DAG](./resources/weather_dag.png)\n\nSince the current data sources do not update daily, it isn't necessary to increase the DAG run frequency, but with the addition of more data sources (more frequently updated), this data pipeline model is generic enough to easily fit these types of daily or even hourly-updated sources, as it is possible to upload the data to S3 (that could work as a Data Lake for raw information) and then load it into Redshift for a more structured organization.\n\n## Data Model\n\nThe data model chosen to represent the data processed by the pipeline is a star schema in a Data Warehouse so that certain ad-hoc queries are possible to analyze the data, but there are also different **distribution styles** and **sortkeys** in order for the data to be able to scale without losing too much performance.\n\n* `dim_cities` uses `diststyle all`, since it is small enough (<5MB) to fit in all Redshift nodes and this way provides a fast way to access this dimension data and avoiding shuffling when using `JOIN`s.\n* `fact_weather` uses `distkey` on `state`, since queries will mostly be made against cities and states and `sortkey` on `date` to be able to filter certain dates and make aggregations more performant.\n* `fact_employment` uses `distkey` on `state` and a `compound sortkey` on `(year, month)` for similiar reasons as `fact_weather`.\n\n![Star Schema](./resources/table_schema.png)\n\n## Scaling Data volume\n\nThe architecture chosen can scale well, as the tables are distributed in a way to reduce the number of partitions read by each query. It also takes into account the number of concurrent access it needs to handle, since Redshift can ditribute the processing into its nodes for each request.\n\nSpark can handle the volume increase as well, using parallel processing by following a similar distribution logic as the Redshift tables.\n\n* **Increasing data by 100x**\n * If the data intake was increased by 100x, it would be wise to use **Parquet** files and partition the data stored into **S3** so that Spark can parallelize the processing. Airflow could fetch the data more frequently to disperse the processing over time and therefore soften both concentrated reads and writes. The table model would not need too much changes, since it can easily store and provide access to much more data than it currently holds using the sort methods and distrubution styles chosen.\n* **Running the data pipeline daily at 7 am**\n * If the pipeline was run daily, it would still work as expected, since Spark would process the daily amount of data and Redshift would receive the data normally. If for some reason there is an error during the execution of the DAG, each task can retry **3** times before the DAG failing. If it does fail, a dashboard consuming the data from Redshift would not fail to render, but would not have the most up to date information, showing only the data from the day before.\n* **Handling 100+ people connecting to the database**\n * If the number of people connected concurrently to the database increased to 100+, a possible strategy to soften the hit on the reads would be to provide **a single or few points of connection**, for example a dashboard that handles certain queries.\n\n## Future Work\n\nThis system could be further enriched with more data sources, such as\n\n* Safety\n* Cost of living\n* Housing indicators\n* Health care\n* Traffic\n* Crime \n* Pollution" }, { "alpha_fraction": 0.649718701839447, "alphanum_fraction": 0.6615961790084839, "avg_line_length": 28.262195587158203, "blob_id": "619cb14aaacd32628f6a148539e77de80973ad9c", "content_id": "cf6f88c18cb90d938a5586a932a1bdbe404fcba9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4799, "license_type": "no_license", "max_line_length": 156, "num_lines": 164, "path": "/airflow/dags/cities.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import logging\nimport os\nfrom datetime import datetime, timedelta\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\n\nfrom airflow import DAG\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.operators import (\n CopyToRedshiftOperator,\n DownloadAndUnzip,\n LoadS3,\n DataQualityOperator,\n)\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\nCITIES_VALIDATION = {\n \"query\": \"select count(*) from dim_cities where city is null\",\n \"result\": 0,\n}\n\ndefault_args = {\n \"owner\": \"pelielo\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2020, 1, 1),\n \"retries\": 3,\n \"retry_delay\": timedelta(minutes=5),\n \"catchup\": False,\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n}\n\ndag = DAG(\n \"cities\",\n default_args=default_args,\n description=\"Loads data to S3 and processes it via Spark into Redshift\",\n schedule_interval=\"@once\",\n)\n\n\ndef spark_job(ds, **kwargs):\n \"\"\"\n Processes all of the necessary steps using Spark and is used as entrypoint for a Python Operator task.\n Can use Airflow context.\n \"\"\"\n\n os.environ[\"AWS_ACCESS_KEY_ID\"] = (\n AwsHook(\"aws_credentials\").get_credentials().access_key\n )\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = (\n AwsHook(\"aws_credentials\").get_credentials().secret_key\n )\n\n logging.info(\"Loaded AWS credentials to environment variables\")\n\n def create_spark_session():\n \"\"\"\n Creates a spark session connecting to the master node and adds necessary packages.\n \"\"\"\n\n spark = (\n SparkSession.builder.master(\"spark://spark-master:7077\")\n .appName(\"cities_processor\")\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\n .getOrCreate()\n )\n return spark\n\n spark = create_spark_session()\n\n logging.info(\"Created spark session\")\n\n input_data_path = \"s3a://udacity-dend-14b1/capstone-project/\"\n\n # get path to cities data file\n cities_data_path = \"cities/uscities.csv\"\n\n # read cities data file\n df = spark.read.csv(input_data_path + cities_data_path, header=True)\n\n logging.info(f\"Read {input_data_path + cities_data_path} into spark dataframe\")\n\n geography_df = (\n df.selectExpr(\n \"city_ascii as city\",\n \"state_name as state\",\n \"state_id as state_code\",\n \"'United States of America' as country\",\n \"lat as latitude\",\n \"lng as longitude\",\n \"population\"\n \"density\", # population per square kilometer\n )\n .filter(\n col(\"state_name\").isin([\"Puerto Rico\", \"District of Columbia\"])\n == False # using only the 50 states\n )\n .dropDuplicates()\n )\n\n logging.info(\"Filtered dataframe and renamed columns\")\n\n logging.info(\"Resulting dataframe:\")\n geography_df.show(10)\n\n geography_df.toPandas().to_csv(\"uscities-processed.csv\", header=True, index=False)\n\n logging.info(\"Dumped dataframe to CSV file in local filesystem\")\n\n\nstart_operator = DummyOperator(task_id=\"Begin_execution\", dag=dag)\n\ndownload_and_unizp_csv = DownloadAndUnzip(\n task_id=\"download_and_unzip\",\n dag=dag,\n url=\"https://simplemaps.com/static/data/us-cities/1.6/basic/simplemaps_uscities_basicv1.6.zip\",\n files_to_extract=[\"uscities.csv\"],\n)\n\nupload_to_s3_raw = LoadS3(\n task_id=\"upload_to_s3_raw\",\n dag=dag,\n filename=\"uscities.csv\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/cities/uscities.csv\",\n)\n\nspark_processor = PythonOperator(\n task_id=\"spark_processor\", dag=dag, python_callable=spark_job, provide_context=True\n)\n\nupload_to_s3_processed = LoadS3(\n task_id=\"upload_to_s3_processed\",\n dag=dag,\n filename=\"uscities-processed.csv\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/cities/uscities-processed.csv\",\n)\n\ncopy_redshift = CopyToRedshiftOperator(\n task_id=\"copy_to_redshift\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n aws_credentials_id=\"aws_credentials\",\n table=\"dim_cities\",\n column_list=\"city, state, state_code, country, latitude, longitude, population, density\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/cities/uscities-processed.csv\",\n)\n\nquality_checks = DataQualityOperator(\n task_id=\"data_quality_checks\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n queries_and_results=[CITIES_VALIDATION],\n)\n\nend_operator = DummyOperator(task_id=\"Stop_execution\", dag=dag)\n\nstart_operator >> download_and_unizp_csv >> upload_to_s3_raw >> spark_processor >> upload_to_s3_processed >> copy_redshift >> quality_checks >> end_operator\n" }, { "alpha_fraction": 0.6466367840766907, "alphanum_fraction": 0.6515694856643677, "avg_line_length": 25.547618865966797, "blob_id": "54481160498d5401fa39de7d209eb7a0343bb7eb", "content_id": "ae420f113ac33fd0825c90b65b3f5e81c188dcce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2230, "license_type": "no_license", "max_line_length": 86, "num_lines": 84, "path": "/delete_cluster.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport boto3\nimport configparser\nimport time\n\n\ndef create_client(service_name, region_name, key, secret):\n print(f\"Creating {service_name} client\")\n\n client = boto3.client(\n service_name=service_name,\n region_name=region_name,\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n )\n\n return client\n\n\ndef prettyRedshiftProps(props):\n pd.set_option(\"display.max_colwidth\", -1)\n keysToShow = [\n \"ClusterIdentifier\",\n \"NodeType\",\n \"ClusterStatus\",\n \"MasterUsername\",\n \"DBName\",\n \"Endpoint\",\n \"NumberOfNodes\",\n \"VpcId\",\n ]\n x = [(k, v) for k, v in props.items() if k in keysToShow]\n return pd.DataFrame(data=x, columns=[\"Key\", \"Value\"])\n\n\ndef check_cluster_properties(redshift_client, cluster_identifier):\n props = redshift_client.describe_clusters(ClusterIdentifier=cluster_identifier)[\n \"Clusters\"\n ][0]\n return prettyRedshiftProps(props)\n\n\ndef kill_cluster(redshift_client, cluster_identifier, skip_snapshot):\n print(\"Deleting Redshift cluster in 10 seconds\")\n #### CAREFUL!!\n time.sleep(10)\n redshift_client.delete_cluster(\n ClusterIdentifier=cluster_identifier, SkipFinalClusterSnapshot=skip_snapshot\n )\n #### CAREFUL!!\n print(\"Cluster is being deleted...\")\n\n\ndef delete_role(iam_client, role_name):\n print(\"Detaching role policy and deleting role\")\n #### CAREFUL!!\n iam_client.detach_role_policy(\n RoleName=role_name, PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam_client.delete_role(RoleName=role_name)\n #### CAREFUL!!\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read_file(open(\"dwh.cfg\"))\n\n KEY = config.get(\"AWS\", \"KEY\")\n SECRET = config.get(\"AWS\", \"SECRET\")\n\n DWH_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n\n DWH_IAM_ROLE_NAME = config.get(\"CLUSTER\", \"IAM_ROLE_NAME\")\n\n redshift = create_client(\"redshift\", \"us-east-1\", KEY, SECRET)\n iam = create_client(\"iam\", \"us-east-1\", KEY, SECRET)\n\n kill_cluster(redshift, DWH_CLUSTER_IDENTIFIER, skip_snapshot=True)\n\n delete_role(iam, DWH_IAM_ROLE_NAME)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7010752558708191, "alphanum_fraction": 0.7032257914543152, "avg_line_length": 23.473684310913086, "blob_id": "b0115ba78ad5ba60152af66d63f2424939d695ab", "content_id": "6863eb3cafc72aec424c37537c2ecc10e2c282e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/airflow/plugins/__init__.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "from __future__ import absolute_import, division, print_function\n\nimport helpers\nimport operators\nfrom airflow.plugins_manager import AirflowPlugin\n\n\n# Defining the plugin class\nclass CapstonePlugin(AirflowPlugin):\n name = \"capstone_plugin\"\n operators = [\n operators.LoadS3,\n operators.DownloadAndUnzip,\n operators.CopyToRedshiftOperator,\n operators.DataQualityOperator\n ]\n # helpers = [\n # helpers.SqlQueries\n # ]\n" }, { "alpha_fraction": 0.6102532148361206, "alphanum_fraction": 0.6355774998664856, "avg_line_length": 29.547170639038086, "blob_id": "59a937801e18f0c8f5d619dfce3aadd8c4b02168", "content_id": "af40c7f29d212d7e6f90cc5d2e1b764155d059b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 86, "num_lines": 53, "path": "/airflow/plugins/operators/load_s3.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "from airflow.hooks.S3_hook import S3Hook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass LoadS3(BaseOperator):\n \"\"\"\n Custom Airflow Operator to load a file from the local filesystem to a S3 Bucket.\n\n :param filename: name of the source file. Can be formatted used Airflow `context`.\n :type filename: str\n\n :param s3_credentials_id: Connection to S3 containing Access Key and Secret Key.\n Must be configured as a S3 Connection via the Airflow webserver UI.\n :type s3_credentials_id: str\n\n :param s3_bucket: Name of the source S3 bucket.\n :type s3_bucket: str\n\n :param s3_key: Remaining path to the source file in S3.\n :type s3_key: str\n \"\"\"\n\n ui_color = \"#03f4fc\"\n\n template_fields = (\"filename\", \"s3_key\")\n\n @apply_defaults\n def __init__(\n self,\n filename=\"\",\n s3_credentials_id=\"\",\n s3_bucket=\"\",\n s3_key=\"\",\n *args,\n **kwargs,\n ):\n\n super(LoadS3, self).__init__(*args, **kwargs)\n self.filename = filename\n self.s3_credentials_id = s3_credentials_id\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n\n def execute(self, context):\n rendered_filename = self.filename.format(**context)\n rendered_s3_key = self.s3_key.format(**context)\n\n s3_path = \"s3://{}/{}\".format(self.s3_bucket, rendered_s3_key)\n self.log.info(f\"Uploading file {rendered_filename} to {s3_path}\")\n\n s3 = S3Hook(self.s3_credentials_id)\n s3.load_file(rendered_filename, rendered_s3_key, self.s3_bucket, replace=True)\n" }, { "alpha_fraction": 0.6513317227363586, "alphanum_fraction": 0.6772014498710632, "avg_line_length": 35.328704833984375, "blob_id": "4bc46d3a20e7a9c9b8a7a5eaff0cf9f941a57f96", "content_id": "43a3623478e8969e6072e84418d2e529125790bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7847, "license_type": "no_license", "max_line_length": 294, "num_lines": 216, "path": "/airflow/dags/employment.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport os\nfrom datetime import datetime, timedelta\n\nimport pyspark.sql.functions as F\nimport requests\nfrom dateutil.parser import parse\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.types import (DecimalType, IntegerType)\n\nfrom airflow import DAG, AirflowException\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.models import Variable\nfrom airflow.operators import LoadS3, CopyToRedshiftOperator, DataQualityOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\nEMPLOYMENT_VALIDATION = {\n \"query\": \"select count(*) from dim_cities where people_employed is null\",\n \"result\": 0,\n}\n\ndefault_args = {\n \"owner\": \"pelielo\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2019, 10, 1),\n \"retries\": 3,\n \"retry_delay\": timedelta(minutes=5),\n \"catchup\": False,\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n}\n\ndag = DAG(\n \"employment\",\n default_args=default_args,\n description=\"Loads data to S3 and processes it via Spark into Redshift\",\n schedule_interval=\"@monthly\",\n)\n\n\ndef http_job(ds, **kwargs):\n \"\"\"\n Downloads monthly employment data from the US Bureau of Labor Statistics Web API.\n Sends a POST request to the timeseries endpoint and specifies the data\n requested on the body of the request. Uses Airflow context to download\n data from specific dates referencing the DAG execution date.\n\n `bls_api_key` is not necessary to retrieve data but using it\n avoids the daily unauthenticated API limits.\n \"\"\"\n\n # gets the DAG execution date\n reference_date = parse(ds).date()\n\n start_year = reference_date.year\n end_year = reference_date.year\n\n bls_api_key = Variable.get(\"bls_api_key\")\n\n prefix = \"SM\" # State and Area Employment\n seasonal_adjustment_code = \"U\" # Not seasonally adjusted\n state_codes = [\"01\",\"02\",\"04\",\"05\",\"06\",\"08\",\"09\",\"10\",\"12\",\"13\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"23\",\"24\",\"25\",\"26\",\"27\",\"28\",\"29\",\"30\",\"31\",\"32\",\"33\",\"34\",\"35\",\"36\",\"37\",\"38\",\"39\",\"40\",\"41\",\"42\",\"44\",\"45\",\"46\",\"47\",\"48\",\"49\",\"50\",\"51\",\"53\",\"54\",\"55\",\"56\"] # All 50 USA state codes\n area_code = \"00000\" # State-wide\n supersector_industry_code = \"00000000\" # Total non-farm\n data_type_code = \"01\" # All Employees, In Thousands\n\n series_id = []\n for state_code in state_codes:\n series_id = series_id + [\n (\n prefix\n + seasonal_adjustment_code\n + state_code\n + area_code\n + supersector_industry_code\n + data_type_code\n )\n ]\n\n url = \"https://api.bls.gov/publicAPI/v2/timeseries/data/\"\n payload = {\n \"seriesid\": series_id,\n \"startyear\": start_year,\n \"endyear\": end_year,\n \"catalog\": True,\n \"calculations\": True,\n \"registrationkey\": bls_api_key,\n }\n headers = {\"Content-type\": \"application/json\"}\n\n logging.info(f\"Sending request to url {url} with payload {payload}\")\n response = requests.request(\"POST\", url, headers=headers, data=json.dumps(payload))\n\n logging.info(f\"Request returned with status code {response.status_code}\")\n if response.status_code != 200:\n raise AirflowException\n\n filename = f\"employment{reference_date.year}{reference_date.month:02d}.json\"\n\n logging.info(f\"Writing {filename} to filesystem\")\n with open(filename, \"w\") as outfile:\n json.dump(json.loads(response.text), outfile)\n\n\ndef spark_job(ds, **kwargs):\n \"\"\"\n Processes all of the necessary steps using Spark and is used as entrypoint for a Python Operator task.\n Can use Airflow context.\n \"\"\"\n\n os.environ[\"AWS_ACCESS_KEY_ID\"] = AwsHook(\"aws_credentials\").get_credentials().access_key\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = AwsHook(\"aws_credentials\").get_credentials().secret_key\n\n logging.info(\"Loaded AWS credentials to environment variables\")\n\n def create_spark_session():\n \"\"\"\n Creates a spark session connecting to the master node and adds necessary packages.\n \"\"\"\n\n spark = (\n SparkSession.builder.master(\"spark://spark-master:7077\")\n .appName(\"cities_processor\")\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\n .getOrCreate()\n )\n return spark\n\n spark = create_spark_session()\n\n logging.info(\"Created spark session\")\n\n input_data_path = \"s3a://udacity-dend-14b1/capstone-project/\"\n\n # get path to employment data file of the specific month\n reference_date = parse(ds).date()\n employment_data_path = f\"employment/{reference_date.year}{reference_date.month:02d}/employment{reference_date.year}{reference_date.month:02d}.json\"\n\n # read employment data file, \n df = spark.read.json(input_data_path + employment_data_path)\n\n logging.info(f\"Read {input_data_path + employment_data_path} into spark dataframe\")\n\n # runs several explode commands to retrieve deeply nested data from json\n series_df = df.select(F.explode(df.Results.series).alias(\"series\"))\n state_data_df = series_df.select(series_df.series.catalog.area.alias(\"state\"), F.explode(series_df.series.data).alias(\"data\"))\n employment_df = state_data_df.select(\n state_data_df.state,\n state_data_df.data.year.alias(\"year\"),\n (state_data_df.data.period)[2:3].cast(IntegerType()).alias(\"month\"),\n ((((state_data_df.data.value))).cast(DecimalType()) * 1000).alias(\"people_employed\"))\n\n logging.info(\"Retrieved the necessary data and renamed columns\")\n\n logging.info(\"Resulting dataframe:\")\n employment_df.show(10)\n\n employment_df.toPandas().to_csv(f\"employment{reference_date.year}{reference_date.month:02d}-processed.csv\", header=True, index=False)\n\n logging.info(\"Dumped dataframe to CSV file in local filesystem\")\n\n\nstart_operator = DummyOperator(task_id=\"Begin_execution\", dag=dag)\n\nhttp_request = PythonOperator(\n task_id=\"http_request\", dag=dag, python_callable=http_job, provide_context=True\n)\n\nupload_to_s3_raw = LoadS3(\n task_id=\"upload_to_s3_raw\",\n dag=dag,\n filename=\"employment{execution_date.year}{execution_date.month:02d}.json\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/employment/{execution_date.year}{execution_date.month:02d}/employment{execution_date.year}{execution_date.month:02d}.json\",\n)\n\nspark_processor = PythonOperator(\n task_id=\"spark_processor\",\n dag=dag,\n python_callable=spark_job,\n provide_context=True\n)\n\nupload_to_s3_processed = LoadS3(\n task_id=\"upload_to_s3_processed\",\n dag=dag,\n filename=\"employment{execution_date.year}{execution_date.month:02d}-processed.csv\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/employment/{execution_date.year}{execution_date.month:02d}/employment{execution_date.year}{execution_date.month:02d}-processed.csv\",\n)\n\ncopy_redshift = CopyToRedshiftOperator(\n task_id=\"copy_to_redshift\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n aws_credentials_id=\"aws_credentials\",\n table=\"fact_employment\",\n column_list=\"state, year, month, people_employed\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/employment/{execution_date.year}{execution_date.month:02d}/employment{execution_date.year}{execution_date.month:02d}-processed.csv\",\n)\n\nquality_checks = DataQualityOperator(\n task_id=\"data_quality_checks\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n queries_and_results=[EMPLOYMENT_VALIDATION],\n)\n\nend_operator = DummyOperator(task_id=\"Stop_execution\", dag=dag)\n\nstart_operator >> http_request >> upload_to_s3_raw >> spark_processor >> upload_to_s3_processed >> copy_redshift >> quality_checks >> end_operator\n" }, { "alpha_fraction": 0.5755135416984558, "alphanum_fraction": 0.5809367299079895, "avg_line_length": 26.533937454223633, "blob_id": "8cc5be4a2a56e6afbfdf4f5fb9801dc5885ea407", "content_id": "9731c761604d245d7d7d5d8facca8bb1d7529638", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6085, "license_type": "no_license", "max_line_length": 88, "num_lines": 221, "path": "/create_cluster.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import configparser\nimport json\nimport time\n\nimport boto3\nimport pandas as pd\nimport schedule\n\n\ndef create_client(service_name, region_name, key, secret):\n print(f\"Creating {service_name} client\")\n\n client = boto3.client(\n service_name=service_name,\n region_name=region_name,\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n )\n\n return client\n\n\ndef create_resource(service_name, region_name, key, secret):\n print(f\"Creating {service_name} resource\")\n\n client = boto3.resource(\n service_name=service_name,\n region_name=region_name,\n aws_access_key_id=key,\n aws_secret_access_key=secret,\n )\n\n return client\n\n\ndef create_iam_role(iam_client, iam_role_name):\n # Create the role\n try:\n print(\"Creating a new IAM Role\")\n iam_client.create_role(\n Path=\"/\",\n RoleName=iam_role_name,\n Description=\"Allows Redshift clusters to call AWS services on your behalf.\",\n AssumeRolePolicyDocument=json.dumps(\n {\n \"Statement\": [\n {\n \"Action\": \"sts:AssumeRole\",\n \"Effect\": \"Allow\",\n \"Principal\": {\"Service\": \"redshift.amazonaws.com\"},\n }\n ],\n \"Version\": \"2012-10-17\",\n }\n ),\n )\n except Exception as e:\n print(e)\n\n print(\"Attaching Policy\")\n\n iam_client.attach_role_policy(\n RoleName=iam_role_name,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\",\n )[\"ResponseMetadata\"][\"HTTPStatusCode\"]\n\n print(\"Getting the IAM role ARN\")\n roleArn = iam_client.get_role(RoleName=iam_role_name)[\"Role\"][\"Arn\"]\n\n print(f\"Role ARN: {roleArn}\")\n\n return roleArn\n\n\ndef create_cluster(\n redshift_client,\n roleArn,\n cluster_type,\n node_type,\n num_nodes,\n db_name,\n cluster_identifier,\n db_user,\n db_password,\n):\n try:\n print(\n f\"\"\"Creating Redshift cluster with following properties:\n cluster type: {cluster_type}, node type: {node_type}, \n number of nodes: {num_nodes}, DB name: {db_name}, \n cluster identifier: {cluster_identifier}\"\"\"\n )\n\n redshift_client.create_cluster(\n # HW\n ClusterType=cluster_type,\n NodeType=node_type,\n NumberOfNodes=int(num_nodes),\n # Identifiers & Credentials\n DBName=db_name,\n ClusterIdentifier=cluster_identifier,\n MasterUsername=db_user,\n MasterUserPassword=db_password,\n # Roles (for s3 access)\n IamRoles=[roleArn],\n )\n\n print(\"Cluster starting...\")\n except Exception as e:\n print(e)\n\n\ndef prettyRedshiftProps(props):\n pd.set_option(\"display.max_colwidth\", -1)\n keysToShow = [\n \"ClusterIdentifier\",\n \"NodeType\",\n \"ClusterStatus\",\n \"MasterUsername\",\n \"DBName\",\n \"Endpoint\",\n \"NumberOfNodes\",\n \"VpcId\",\n ]\n x = [(k, v) for k, v in props.items() if k in keysToShow]\n return pd.DataFrame(data=x, columns=[\"Key\", \"Value\"])\n\n\ndef check_cluster_properties(redshift_client, cluster_identifier):\n return redshift_client.describe_clusters(ClusterIdentifier=cluster_identifier)[\n \"Clusters\"\n ][0]\n\n\ndef authorize_cluster_access(ec2_resource, vpc_id, db_port):\n print(f\"Authorizing inbound access to port {db_port}\")\n try:\n vpc = ec2_resource.Vpc(id=vpc_id)\n defaultSg = list(vpc.security_groups.all())[0]\n\n print(f\"Default Security Group: {defaultSg}\")\n\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp=\"0.0.0.0/0\",\n IpProtocol=\"TCP\",\n FromPort=int(db_port),\n ToPort=int(db_port),\n )\n except Exception as e:\n print(e)\n\n\ndef main():\n config = configparser.ConfigParser()\n config.read_file(open(\"settings.cfg\"))\n\n KEY = config.get(\"AWS\", \"KEY\")\n SECRET = config.get(\"AWS\", \"SECRET\")\n\n DWH_CLUSTER_TYPE = config.get(\"CLUSTER\", \"CLUSTER_TYPE\")\n DWH_NUM_NODES = config.get(\"CLUSTER\", \"NUM_NODES\")\n DWH_NODE_TYPE = config.get(\"CLUSTER\", \"NODE_TYPE\")\n\n DWH_CLUSTER_IDENTIFIER = config.get(\"CLUSTER\", \"CLUSTER_IDENTIFIER\")\n DWH_DB = config.get(\"CLUSTER\", \"DB_NAME\")\n DWH_DB_USER = config.get(\"CLUSTER\", \"DB_USER\")\n DWH_DB_PASSWORD = config.get(\"CLUSTER\", \"DB_PASSWORD\")\n DWH_PORT = config.get(\"CLUSTER\", \"DB_PORT\")\n\n DWH_IAM_ROLE_NAME = config.get(\"CLUSTER\", \"IAM_ROLE_NAME\")\n\n redshift = create_client(\"redshift\", \"us-east-1\", KEY, SECRET)\n iam = create_client(\"iam\", \"us-east-1\", KEY, SECRET)\n ec2 = create_resource(\"ec2\", \"us-east-1\", KEY, SECRET)\n\n roleArn = create_iam_role(iam, DWH_IAM_ROLE_NAME)\n\n create_cluster(\n redshift,\n roleArn,\n DWH_CLUSTER_TYPE,\n DWH_NODE_TYPE,\n DWH_NUM_NODES,\n DWH_DB,\n DWH_CLUSTER_IDENTIFIER,\n DWH_DB_USER,\n DWH_DB_PASSWORD,\n )\n\n def ensure_cluster_available():\n cluster_props = check_cluster_properties(redshift, DWH_CLUSTER_IDENTIFIER)\n\n # print(cluster_props)\n\n cluster_status = cluster_props[\"ClusterStatus\"]\n\n if cluster_status == \"available\":\n print(\"Redshift cluster available!\")\n return schedule.CancelJob\n\n schedule.every(30).seconds.do(ensure_cluster_available)\n\n while True:\n schedule.run_pending()\n if schedule.jobs == []:\n break\n time.sleep(5)\n\n cluster_props = check_cluster_properties(redshift, DWH_CLUSTER_IDENTIFIER)\n\n DWH_ENDPOINT = cluster_props[\"Endpoint\"][\"Address\"]\n DWH_ROLE_ARN = cluster_props[\"IamRoles\"][0][\"IamRoleArn\"]\n print(f\"DWH_ENDPOINT: {DWH_ENDPOINT}\")\n print(f\"DWH_ROLE_ARN: {DWH_ROLE_ARN}\")\n\n authorize_cluster_access(ec2, cluster_props[\"VpcId\"], DWH_PORT)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6430768966674805, "alphanum_fraction": 0.6467692255973816, "avg_line_length": 30.25, "blob_id": "c9458aafa125091a649a130b2a0325d764cb1ef3", "content_id": "948188d8520311cfd1a823370d0a86372ab6c736", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1625, "license_type": "no_license", "max_line_length": 95, "num_lines": 52, "path": "/airflow/plugins/operators/download_and_unzip.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import logging\nfrom zipfile import ZipFile\n\nimport requests\n\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass DownloadAndUnzip(BaseOperator):\n \"\"\"\n Custom Airflow Operator to download a file via http and unzip specific files to filesystem.\n\n :param url: Url of the source file do download\n :type url: str\n\n :param files_to_extract: List of file names to extract from the downloaded file.\n :type files_to_extract: list(str)\n \"\"\"\n\n ui_color = \"#03f4fc\"\n\n @apply_defaults\n def __init__(self, url=\"\", files_to_extract=[\"\"], *args, **kwargs):\n\n super(DownloadAndUnzip, self).__init__(*args, **kwargs)\n self.url = url\n self.files_to_extract = files_to_extract\n\n def download_url(self, url, save_path, chunk_size=128):\n r = requests.get(url, stream=True)\n with open(save_path, \"wb\") as fd:\n for chunk in r.iter_content(chunk_size=chunk_size):\n fd.write(chunk)\n\n def extract_file_from_zip(self, source_file, files_to_extract):\n # Create a ZipFile Object\n with ZipFile(source_file, \"r\") as zip_obj:\n # Extract the necessary files\n for file in files_to_extract:\n zip_obj.extract(file)\n\n def execute(self, context):\n temp_file = \"temp.zip\"\n\n logging.info(f\"Downloading file from {self.url} and saving to {temp_file}\")\n\n self.download_url(self.url, temp_file)\n\n logging.info(f\"Extracting files {self.files_to_extract} from {temp_file}\")\n\n self.extract_file_from_zip(temp_file, self.files_to_extract)\n" }, { "alpha_fraction": 0.6126601099967957, "alphanum_fraction": 0.6209495067596436, "avg_line_length": 29.86046600341797, "blob_id": "7e0c1cdfc5196ecbb90ab9825553c1491bafdafe", "content_id": "11ce5ad16bd7f2ef205427a6548ccfa1af18a875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2654, "license_type": "no_license", "max_line_length": 90, "num_lines": 86, "path": "/airflow/plugins/operators/copy_redshift.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "from airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass CopyToRedshiftOperator(BaseOperator):\n \"\"\"\n Custom Airflow Operator to copy a csv file to a Redshift table using COPY SQL command.\n Uses AWS Access Key and Secret Key as authentication method.\n\n :param redshift_conn_id: Connection ID used to connect to Redshift database. \n Must be configured as a Postgres Connection via the Airflow webserver UI.\n :type redshift_conn_id: str\n\n :param aws_credentials_id: Connection to AWS containing Access Key and Secret Key.\n Must be configured as a AWS Connection via the Airflow webserver UI.\n :type aws_credentials_id: str\n\n :param table: Table name of the destination\n :type table: str\n\n :param column_list: Comma separated list of the destination column names used\n to map the csv to the table.\n :type column_list: str\n\n :param s3_bucket: Name of the source S3 bucket.\n :type s3_bucket: str\n\n :param s3_key: Remaining path to the source file in S3.\n :type s3_key: str\n \"\"\"\n\n ui_color = \"#03f4fc\"\n\n template_fields = (\"s3_key\",)\n\n copy_sql = \"\"\"\n COPY {}\n ({})\n FROM '{}'\n ACCESS_KEY_ID '{}'\n SECRET_ACCESS_KEY '{}'\n CSV\n IGNOREHEADER 1;\n \"\"\"\n\n @apply_defaults\n def __init__(\n self,\n redshift_conn_id=\"\",\n aws_credentials_id=\"\",\n table=\"\",\n column_list=\"\",\n s3_bucket=\"\",\n s3_key=\"\",\n *args,\n **kwargs,\n ):\n\n super(CopyToRedshiftOperator, self).__init__(*args, **kwargs)\n self.table = table\n self.redshift_conn_id = redshift_conn_id\n self.s3_bucket = s3_bucket\n self.s3_key = s3_key\n self.aws_credentials_id = aws_credentials_id\n self.column_list = column_list\n\n def execute(self, context):\n aws_hook = AwsHook(self.aws_credentials_id)\n credentials = aws_hook.get_credentials()\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n\n rendered_key = self.s3_key.format(**context)\n self.log.info(\n f\"Copying data from {rendered_key} to Redshift table {self.table}\"\n )\n s3_path = \"s3://{}/{}\".format(self.s3_bucket, rendered_key)\n formatted_sql = CopyToRedshiftOperator.copy_sql.format(\n self.table,\n self.column_list,\n s3_path,\n credentials.access_key,\n credentials.secret_key,\n )\n redshift.run(formatted_sql)\n" }, { "alpha_fraction": 0.7767295837402344, "alphanum_fraction": 0.7861635088920593, "avg_line_length": 27.909090042114258, "blob_id": "14f3e4e00aa17de470101479ccd1a63ab14edf4e", "content_id": "3cd00138799835d30a80689a680a2bc842a364e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 318, "license_type": "no_license", "max_line_length": 58, "num_lines": 11, "path": "/airflow/plugins/operators/__init__.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "from operators.copy_redshift import CopyToRedshiftOperator\nfrom operators.download_and_unzip import DownloadAndUnzip\nfrom operators.load_s3 import LoadS3\nfrom operators.data_quality import DataQualityOperator\n\n__all__ = [\n 'LoadS3',\n 'DownloadAndUnzip',\n 'CopyToRedshiftOperator',\n 'DataQualityOperator'\n]\n" }, { "alpha_fraction": 0.6527514457702637, "alphanum_fraction": 0.6565464735031128, "avg_line_length": 35.76744079589844, "blob_id": "dc4c2246ff0ccb5b84a6bce0507f91e82b7c3319", "content_id": "01dfec45d433a961797cb8ad1303f75659b0f00c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1581, "license_type": "no_license", "max_line_length": 81, "num_lines": 43, "path": "/airflow/plugins/operators/data_quality.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\n\nclass DataQualityOperator(BaseOperator):\n \"\"\"\n Custom Airflow Operator to run data quality checks. Runs the query provided\n and compares its result with the expected result. If there isn't a match,\n raises an `Exception`.\n\n :param redshift_conn_id: Connection ID used to connect to Redshift database. \n Must be configured as a Postgres Connection via the Airflow webserver UI.\n :type redshift_conn_id: str\n\n :param queries_and_results: List of maps where each item holds a query string\n to be executed and the expected result.\n :type queries_and_results: list(map('query': str, 'result': int))\n \"\"\"\n\n ui_color = \"#0384fc\"\n\n @apply_defaults\n def __init__(\n self,\n redshift_conn_id=\"\",\n queries_and_results=[{\"query\": \"\", \"result\": 0}],\n *args,\n **kwargs,\n ):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.redshift_conn_id = redshift_conn_id\n self.queries_and_results = queries_and_results\n\n def execute(self, context):\n redshift = PostgresHook(postgres_conn_id=self.redshift_conn_id)\n for quality_check in self.queries_and_results:\n self.log.info(\"Running data validation query\")\n result = redshift.get_first(quality_check[\"query\"])\n self.log.info(f\"result: {result}\")\n if result[0] != quality_check[\"result\"]:\n raise ValueError\n" }, { "alpha_fraction": 0.4642857015132904, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 13.5, "blob_id": "d6374485c5c4260bb89e97dd08fc384789051c3c", "content_id": "aef27696db51a5cfb80121105a30236b72ef19b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 28, "license_type": "no_license", "max_line_length": 14, "num_lines": 2, "path": "/requirements.txt", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "kaggle==1.5.6\npyspark==2.4.5" }, { "alpha_fraction": 0.6620515584945679, "alphanum_fraction": 0.6697568297386169, "avg_line_length": 31.960317611694336, "blob_id": "847f35974912e02ca19e7649d52361fdf938b5a1", "content_id": "28e684a0adccc0f484691e07c56427b2176a097a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8306, "license_type": "no_license", "max_line_length": 160, "num_lines": 252, "path": "/airflow/dags/weather.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "import logging\nimport json\nimport os\nfrom datetime import datetime, timedelta\nfrom zipfile import ZipFile\n\nimport requests\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import udf\nfrom pyspark.sql.types import DoubleType\n\nfrom airflow import DAG\nfrom airflow.contrib.hooks.aws_hook import AwsHook\nfrom airflow.models import Variable\nfrom airflow.operators import LoadS3, CopyToRedshiftOperator, DataQualityOperator\nfrom airflow.operators.dummy_operator import DummyOperator\nfrom airflow.operators.python_operator import PythonOperator\n\nos.environ[\"KAGGLE_USERNAME\"] = Variable.get(\"kaggle_username\")\nos.environ[\"KAGGLE_KEY\"] = Variable.get(\"kaggle_api_key\")\n\nimport kaggle # uses KAGGLE_USERNAME and KAGGLE_KEY\n\nWEATHER_VALIDATION = {\n \"query\": \"select count(*) from fact_weather where avg_temp is null\",\n \"result\": 0,\n}\n\ndefault_args = {\n \"owner\": \"pelielo\",\n \"depends_on_past\": False,\n \"start_date\": datetime(2020, 1, 1),\n \"retries\": 3,\n \"retry_delay\": timedelta(minutes=5),\n \"catchup\": False,\n \"email_on_failure\": False,\n \"email_on_retry\": False,\n}\n\n\ndag = DAG(\n \"weather\",\n default_args=default_args,\n description=\"Loads data to S3 and processes it via Spark into Redshift\",\n schedule_interval=\"@once\",\n)\n\n\ndef fetch_job():\n \"\"\"\n Downloads a dataset file from Kaggle using Kaggle's API authenticated with\n `KAGGLE_USERNAME` and `KAGGLE_KEY` envioronment variables. Then, unzips the\n necessary file and saves it to the local filesystem.\n \"\"\"\n\n def download_and_unzip(dataset_owner, dataset_name, save_path, files_to_extract):\n kaggle.api.dataset_download_files(\n dataset=f\"{dataset_owner}/{dataset_name}\", path=save_path, force=True\n )\n extract_file_from_zip(f\"{dataset_name}.zip\", files_to_extract)\n\n def extract_file_from_zip(source_file, files_to_extract):\n # create a ZipFile Object\n with ZipFile(source_file, \"r\") as zip_obj:\n # extract the necessary files\n for file in files_to_extract:\n zip_obj.extract(file)\n\n dataset_owner = \"berkeleyearth\"\n dataset_name = \"climate-change-earth-surface-temperature-data\"\n save_path = \"./\"\n files_to_extract = [\"GlobalLandTemperaturesByCity.csv\"]\n\n logging.info(\"Authenticating to Kaggle\")\n kaggle.api.authenticate() # uses KAGGLE_USERNAME and KAGGLE_KEY\n\n logging.info(\"Downloading dataset file and unzipping into filesystem\")\n download_and_unzip(dataset_owner, dataset_name, save_path, files_to_extract)\n\n\ndef spark_job(ds, **kwargs):\n \"\"\"\n Processes all of the necessary steps using Spark and is used as entrypoint for a Python Operator task.\n Can use Airflow context.\n \"\"\"\n\n os.environ[\"AWS_ACCESS_KEY_ID\"] = (\n AwsHook(\"aws_credentials\").get_credentials().access_key\n )\n os.environ[\"AWS_SECRET_ACCESS_KEY\"] = (\n AwsHook(\"aws_credentials\").get_credentials().secret_key\n )\n\n logging.info(\"Loaded AWS credentials to environment variables\")\n\n opencage_api_key = Variable.get(\"opencage_api_key\")\n\n def create_spark_session():\n spark = (\n SparkSession.builder.master(\"spark://spark-master:7077\")\n .appName(\"cities_processor\")\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\")\n .config(\"spark.sql.broadcastTimeout\", 1200)\n .getOrCreate()\n )\n return spark\n\n def concat_coordinates(latitude, longitude):\n return str(latitude) + \",\" + str(longitude)\n\n def reverse_geocode(latitude, longitude, api_key):\n \"\"\"\n Uses Open Cage Web API to reverse geocode a tuple of\n latitude and logitude, resulting in the state in which\n those coordinates are located.\n\n :param latitude: the latitude part of the coordinate\n :type latitude: str\n\n :param longitude: the longitude part of the coordinate\n :type longitude: str\n\n :param api_key: OpenCage's API key to authenticate requests\n :type api_key: str\n \"\"\"\n\n url = \"https://api.opencagedata.com/geocode/v1/json\"\n lat_long = concat_coordinates(latitude, longitude)\n params = {\"q\": lat_long, \"key\": api_key}\n return requests.request(\"GET\", url, params=params)\n\n def state_parser(json_str):\n \"\"\"\n Parses the response of the reverse geocode request and extracts the state name.\n\n :param json_str: json response to be parsed\n :type json_str: str\n \"\"\"\n try:\n return json.loads(json_str.text)[\"results\"][0][\"components\"][\"state\"]\n except KeyError:\n return None\n\n spark = create_spark_session()\n\n logging.info(\"Created spark session\")\n\n input_data_path = \"s3a://udacity-dend-14b1/capstone-project/\"\n\n # get path to weather data file\n weather_data_path = \"weather/usweather.csv\"\n\n # read weather data file\n df = spark.read.csv(input_data_path + weather_data_path, header=True)\n\n geo_udf = udf(\n lambda x: float(x[0:-1]) if (x[-1] == \"N\" or x[-1] == \"E\") else -float(x[0:-1]),\n DoubleType(),\n )\n round_udf = udf(lambda x: round(float(x), 3), DoubleType())\n location_enricher_udf = udf(\n lambda lat, long: state_parser(reverse_geocode(lat, long, opencage_api_key))\n )\n\n # filters and cleans dataset\n cleaned_df = df.select(\n round_udf(df.AverageTemperature).alias(\"avg_temp\"),\n round_udf(df.AverageTemperatureUncertainty).alias(\"avg_temp_uncertainty\"),\n df.City.alias(\"city\"),\n geo_udf(df.Latitude).alias(\"latitude\"),\n geo_udf(df.Longitude).alias(\"longitude\"),\n df.dt.alias(\"date\"),\n ).where((df.Country == \"United States\") & df.AverageTemperature.isNotNull())\n\n # separetes the distinct coordinates to request reverse geocoding\n location_df = cleaned_df.select(\n cleaned_df.latitude.alias(\"latitude\"), cleaned_df.longitude.alias(\"longitude\")\n ).dropDuplicates()\n\n enriched_df = location_df.withColumn(\n \"state\", location_enricher_udf(location_df.latitude, location_df.longitude)\n )\n\n # joins back the source dataframe with the enriched dataframe containing state names\n weather_df = cleaned_df.join(\n enriched_df,\n (\n (cleaned_df.latitude == enriched_df.latitude)\n & (cleaned_df.longitude == enriched_df.longitude)\n ),\n how=\"inner\",\n ).drop(\"latitude\", \"longitude\")\n\n logging.info(\"Filtered dataframe and renamed columns\")\n\n logging.info(\"Resulting dataframe:\")\n weather_df.show(10)\n\n weather_df.toPandas().to_csv(\"usweather-processed.csv\", header=True, index=False)\n\n logging.info(\"Dumped dataframe to CSV file in local filesystem\")\n\n\nstart_operator = DummyOperator(task_id=\"Begin_execution\", dag=dag)\n\ndownload_dataset_and_unizp = PythonOperator(\n task_id=\"download_dataset_and_unizp\", dag=dag, python_callable=fetch_job\n)\n\nupload_to_s3_raw = LoadS3(\n task_id=\"upload_to_s3_raw\",\n dag=dag,\n filename=\"GlobalLandTemperaturesByCity.csv\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/weather/weather.csv\",\n)\n\nspark_processor = PythonOperator(\n task_id=\"spark_processor\", dag=dag, python_callable=spark_job, provide_context=True\n)\n\nupload_to_s3_processed = LoadS3(\n task_id=\"upload_to_s3_processed\",\n dag=dag,\n filename=\"usweather-processed.csv\",\n s3_credentials_id=\"s3_conn\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/weather/usweather-processed.csv\",\n)\n\ncopy_redshift = CopyToRedshiftOperator(\n task_id=\"copy_to_redshift\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n aws_credentials_id=\"aws_credentials\",\n table=\"fact_weather\",\n column_list=\"avg_temp, avg_temp_uncertainty, city, date, state\",\n s3_bucket=\"udacity-dend-14b1\",\n s3_key=\"capstone-project/weather/usweather-processed.csv\",\n)\n\nquality_checks = DataQualityOperator(\n task_id=\"data_quality_checks\",\n dag=dag,\n redshift_conn_id=\"redshift\",\n queries_and_results=[WEATHER_VALIDATION],\n)\n\nend_operator = DummyOperator(task_id=\"Stop_execution\", dag=dag)\n\nstart_operator >> download_dataset_and_unizp >> upload_to_s3_raw >> spark_processor >> upload_to_s3_processed >> copy_redshift >> quality_checks >> end_operator\n" }, { "alpha_fraction": 0.6451402306556702, "alphanum_fraction": 0.6464448571205139, "avg_line_length": 25.894737243652344, "blob_id": "b20e927890e6594cec4ca73b7f61aae8584fe28c", "content_id": "bf182f0a17f9305316de81e0770dc776cf4d6b07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1533, "license_type": "no_license", "max_line_length": 83, "num_lines": 57, "path": "/sql_statements.py", "repo_name": "Pelielo/nd027-capstone-project", "src_encoding": "UTF-8", "text": "# DROP TABLES\n\ncities_table_drop = \"drop table if exists dim_cities\"\nweather_table_drop = \"drop table if exists fact_weather\"\nemployment_table_drop = \"drop table if exists fact_employment\"\n\n# CREATE TABLES\n\ncities_table_create = \"\"\"\n create table if not exists dim_cities (\n city_id int identity(1, 1),\n city varchar not null,\n state varchar not null,\n state_code varchar not null,\n country varchar not null,\n latitude double precision not null,\n longitude double precision not null,\n population int not null,\n density double precision null,\n primary key (city_id)\n )\n diststyle all;\n\"\"\"\n\nweather_table_create = \"\"\"\n create table if not exists fact_weather (\n avg_temp double precision not null,\n avg_temp_uncertainty double precision null,\n city varchar not null,\n state varchar not null,\n date date not null,\n primary key (date, state, city)\n )\n distkey (state)\n sortkey (date);\n\"\"\"\n\nemployment_table_create = \"\"\"\n create table if not exists fact_employment (\n people_employed int null,\n state varchar not null,\n year int not null,\n month int not null,\n primary key (state, year, month)\n )\n distkey (state)\n compound sortkey (year, month);\n\"\"\"\n\n# QUERY LISTS\n\ncreate_table_queries = [\n cities_table_create,\n weather_table_create,\n employment_table_create,\n]\ndrop_table_queries = [cities_table_drop, weather_table_drop, employment_table_drop]\n" } ]
14
LiorMahfoda/Mutable-Recursive-List
https://github.com/LiorMahfoda/Mutable-Recursive-List
68d6cb400c1bf6e2169de0ca43b4737dab60a5ce
6d3d4deaa83bf2c2593960dc76a1b6e7ba7f888e
d8e7eabafd8aefb86c6597e62ebf659912893ea7
refs/heads/master
2020-09-11T14:55:43.590453
2019-11-16T13:29:23
2019-11-16T13:29:23
222,103,305
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5052316784858704, "alphanum_fraction": 0.5136253833770752, "avg_line_length": 27.240259170532227, "blob_id": "685a4f6c6fa8014754e1370df1d8cf15686b7b5d", "content_id": "edf61754fd22cae365996aeb48e466ff7ee255f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8697, "license_type": "no_license", "max_line_length": 140, "num_lines": 308, "path": "/HW3_302782233_201621141.py", "repo_name": "LiorMahfoda/Mutable-Recursive-List", "src_encoding": "UTF-8", "text": "\"Task 1\"\n\"-------------------------------------------------------\"\n\ndef make_date(year,month,day):\n '''\nfunction returns date sections according to given message:\nyear - the year in number\nmonth - from number to word\nday - the day in number\nstr - string representation of the date\n '''\n def dispatch(m):\n if m =='day':\n return day\n elif m == 'month':\n return months(month)\n elif m =='year':\n return year\n\n def months(msg): \n if msg==1:\n return 'January'\n elif msg==2:\n return 'February' \n elif msg==3:\n return 'March'\n elif msg==4:\n return 'April' \n elif msg==5:\n return 'May' \n elif msg==6:\n return 'June' \n elif msg==7:\n return 'July' \n elif msg==8:\n return 'August' \n elif msg==9:\n return 'September'\n elif msg==10:\n return 'October'\n elif msg==11:\n return 'November'\n elif msg==12:\n return 'December' \n return dispatch\n \ndef year(f):\n return f('year')\n\ndef month(f):\n return f('month')\n\ndef day(f):\n return f('day')\n\ndef str_date(f):\n return '{0}th of {1}, {2}'.format(year(f),month(f),day(f))\n \n \n\nd = make_date(2016, 12, 26)\nprint(d)\nprint(year(d))\nprint(month(d))\nprint(day(d))\nprint(str_date(d))\n\n\n\"-------------------------------------------------------------\"\n\"\"\" Task 2 \"\"\"\n\"-------------------------------------------------------------\"\n\ndef data_preprocessing_file_types(str):\n \"enumerating the data + cleaning the data + complete missing values\"\n str=map(lambda x: x if x[-1:]!='.' else x+\"txt\" ,map(lambda x: \".\".join(x.split(\"..\")),str.split(\";\")))\n str=tuple(map(lambda x: x.split(\".\"),(filter(lambda x: \".\" in x,str))))\n return Counter(map(lambda x: x[1],str)).most_common()\n\ndef data_preprocessing_tree(str):\n \"enumerating the data + cleaning the data + complete missing values\"\n str=map(lambda x: x if x[-1:]!='.' else x+\"txt\" ,map(lambda x: \".\".join(x.split(\"..\")),str.split(\";\")))\n \"creates pairs of file path and file name\"\n str=map(lambda x: tuple(x),map(lambda x:\"\".join(x).replace('/f',\"/ f\").split() if x.count(\".\") == 1 else (x, None),str))\n \"creates the files tree\"\n str=map(lambda x:(x[0],tuple(map(lambda x: x[1],filter(lambda file: True if file[0] == x[0] else False, str)))), str)\n return list(set(str))\n\ndata=\"/User/someuser/file.py;/tmp/download/file.zip;/tmp/download/file2.zip;/;/usr/local/bin;/User/someuser/file..py;/tmp/file.;/usr//some;\"\n\n\n\"-------------------------------------------------------------\"\n\"\"\" Task 3 \"\"\"\n\"-------------------------------------------------------------\"\n\ndef make_currency(amount,symbol):\n '''\n function returns sections according to given message:\n get_value - value of amount/symbol \n set_value - update value of amount\n str - string of type\n convert - convert given amount to another currency\n '''\n def dispatch(msg):\n if msg == 'get_value':\n return get_value\n elif msg == 'set_value':\n return set_value\n elif msg == 'str':\n return str(amount,symbol)\n elif msg == 'convert':\n return convert\n \n def get_value(m):\n nonlocal amount,symbol\n if m =='amount':\n return amount\n elif m =='symbol':\n return symbol\n\n def set_value(m,new):\n nonlocal amount\n if m == 'amount':\n amount = new\n \n def str(symbol,amount):\n return repr('{0}{1}'.format(symbol,amount))\n \n def convert(func,symbol):\n nonlocal amount\n amount = func(amount) \n\n return dispatch\n\nc = make_currency(10.50, '$')\nprint(c('get_value')('amount'))\nprint(c('get_value')('symbol'))\nc('set_value')('amount', 50)\nprint(c('get_value')('amount'))\nprint(c('str'))\nc('convert')(lambda x: x*3.87,'Shekel')\nprint(c('str'))\n\n\"\"\"----------------------------------------\"\"\"\n\"\"\" Task 4 \"\"\"\n\"\"\"----------------------------------------\"\"\"\ndef get_reverse_map_iterator(s = [],g = lambda x:x):\n '''\nfunction input: sequence and lambda function\nreturn value: dictionary with messages: \nnext - next value in the Iterator \nhas_more - boolean funcion which return T/F if there is next value \n '''\n index = 0\n records = list(s)\n records.reverse\n def next():\n nonlocal index\n if index>= len(records):\n return 'no more items'\n item = g(records[index])\n index+=1\n print(item)\n return item\n \n def has_more():\n return index < len(records)\n\n dispatch = {'next': next, 'has_more': has_more}\n return dispatch\n\nit = get_reverse_map_iterator((1,3,6), lambda x: 1/x)\nwhile it['has_more']():\n it['next']()\n\nit = get_reverse_map_iterator((1,3,6))\nfor i in range(1,6):\n it['next']()\n\n\"\"\"-------------------------------------------------\"\"\"\n\"\"\"Task 5\"\"\"\n\"\"\"-------------------------------------------------\"\"\"\nimport copy\n\nempty_rlist = None\ndef make_rlist(first, rest):\n \"\"\"Make a recursive list from its first element and the rest.\"\"\"\n return (first, rest)\ndef first(s):\n \"\"\"Return the first element of a recursive list s.\"\"\"\n return s[0]\ndef rest(s):\n \"\"\"Return the rest of the elements of a recursive list s.\"\"\"\n return s[1]\ndef len_rlist(s):\n \"\"\"Return the length of recursive list s.\"\"\"\n length = 0\n while s != empty_rlist:\n s, length = rest(s), length + 1\n return length\ndef getitem_rlist(s, i):\n \"\"\"Return the element at index i of recursive list s.\"\"\"\n while i > 0:\n s, i = rest(s), i - 1\n return first(s)\n\ndef make_mutable_rlist(tcopy = None):\n \"\"\"Return a functional implementation of a mutable recursive list.\"\"\"\n contents = empty_rlist \n if tcopy != None:\n rlist = make_mutable_rlist()\n L=tcopy['str']()\n tmp=[]\n \n for i in range(len(L)):\n tmp.append(L[i])\n tmp.reverse()\n \n for i in range(len(L)):\n rlist['push_first'](tmp[i])\n return rlist\n \n def length():\n \"\"\"return the length of the list.\"\"\"\n return len_rlist(contents)\n \n def get_item(ind):\n \"\"\"returns the item that in index ind of the list\"\"\"\n return getitem_rlist(contents, ind)\n \n def push_first(value):\n \"\"\"\"puts the beginning of the list\"\"\"\n nonlocal contents\n contents = make_rlist(value, contents)\n \n def pop_first():\n \"\"\"removes and returns the first element in the list\"\"\"\n nonlocal contents\n f = first(contents)\n contents = rest(contents)\n return f\n\n def str():\n \"\"\" creat list from mutable_rlist\"\"\"\n L=[]\n for x in range(length()):\n L.append(get_item(x))\n return L \n\n def extend(rlist):\n \"\"\" get list from typy mutable_rlist make hem list,\n extend the old and the new list and return as mutable_rlist \"\"\"\n nonlocal contents\n lst= str() + rlist['str']()\n lst.reverse()\n contents=None\n \n for i in range(len(lst)):\n push_first(lst[i])\n \n def Slice (First,Last):\n \"\"\" get first an last pacth creat a mutable_rlist and empty list,\n slice the list type and then return to new mutable_rlist var\"\"\"\n sliced = make_mutable_rlist()\n tmp=[]\n \n for i in range(First,Last):\n tmp.append(str()[i])\n tmp.reverse()\n \n for i in range(First,Last):\n sliced['push_first'](tmp[i])\n return sliced\n \n def get_iterator():\n L=str()\n i = 0\n \n def hasNext():\n nonlocal i\n return (i < len(L))\n \n def next():\n nonlocal i,L\n if hasNext():\n nextt = L[i]\n i+=1\n return nextt\n return None\n \n return {'next': next, 'hasNext': hasNext}\n \n return {'length':length, 'get_item':get_item, 'push_first':push_first, \n 'pop_first': pop_first, 'str':str, 'slice':Slice,'extend':extend,'get_iterator':get_iterator}\n\n# \"print\"\n# my_list= make_mutable_rlist()\n# for x in range(4):\n# my_list['push_first'](x)\n# print(my_list['str']())\n# ext= make_mutable_rlist(my_list)\n# my_list['extend'](ext)\n# print(my_list['str']())\n# print(my_list['slice'](0,2)['str']())\n# yout_list = make_mutable_rlist(my_list)\n# print(yout_list['str']())\n# it=my_list['get_iterator']()\n# while it['hasNext']():\n# print(it['next']())" } ]
1
lbwa/hello-python
https://github.com/lbwa/hello-python
34faa1cc194768dea905c39224350e9b3b23a25b
b36567dd1ce9f2d14345ed40e24c49c2b9a33ce4
866b924b7ca9852b68b478f51c6c4608e19d17db
refs/heads/master
2020-06-13T16:12:46.139701
2019-07-03T01:14:00
2019-07-03T01:14:00
194,706,332
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6520000100135803, "alphanum_fraction": 0.6639999747276306, "avg_line_length": 21.727272033691406, "blob_id": "36b3c17fdb469d5e51cff9157e0787f151db725d", "content_id": "bc310aae26e2b10bb51214f490bb60a45138b4e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 59, "num_lines": 11, "path": "/shell.py", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# DOC: https://docs.python.org/3/library/subprocess.html\n\nimport sys\nimport subprocess\n\nif len(sys.argv) >= 2:\n subprocess.run(['node', sys.argv[1]], shell=True)\nelse:\n print('\\nParameter is required, eg. \\'./shell.py -v\\'')\n" }, { "alpha_fraction": 0.5709342360496521, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 22.432432174682617, "blob_id": "94bbe22621d9a7a45a4efee8133079377db6397f", "content_id": "f616c299d9e3283edde5073e12c6f69431d778de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 867, "license_type": "no_license", "max_line_length": 63, "num_lines": 37, "path": "/DEF_BMI.py", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# Body mass index(BMI)\n# wiki: https://en.wikipedia.org/wiki/Body_mass_index\n\nimport sys\n\nprint('\\nBMI calculator function, build with python v3.7.3\\n')\n\n\ndef computeBMI(height, weight):\n bmi = weight / ((height / 100) ** 2)\n\n if bmi < 18.5:\n bmiPrinter(round(bmi, 4), 'Underweight')\n\n elif bmi >= 18.5 and bmi < 25:\n bmiPrinter(round(bmi, 4), 'Healthy weight')\n\n elif bmi >= 25 and bmi < 30:\n bmiPrinter(round(bmi, 4), 'Overweight')\n\n elif bmi >= 30 and bmi < 35:\n bmiPrinter(round(bmi, 4), 'Moderately obese')\n\n elif bmi > 35:\n bmiPrinter(round(bmi, 4), 'Severely obese')\n\n\ndef bmiPrinter(bmi, msg):\n print('\\n[INFO]: Your BMI is', bmi, '\\n[SUGGESTION]:', msg)\n\n\ncomputeBMI(\n float(sys.argv[1] or input('\\nYour height(CM) \\n')),\n float(sys.argv[2] or input('\\nYour weight(KG) \\n'))\n)\n" }, { "alpha_fraction": 0.7317073345184326, "alphanum_fraction": 0.7317073345184326, "avg_line_length": 12.666666984558105, "blob_id": "f441be607ba36d808a54c67cd810d96a70d03eea", "content_id": "a59c121e5b1950a6321de50617c9a82e401285e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/README.md", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "# hello python\n\n> Learning how to python\n" }, { "alpha_fraction": 0.5834394693374634, "alphanum_fraction": 0.5949044823646545, "avg_line_length": 24.322580337524414, "blob_id": "1a99de410982971ea5a1bbe2cd7faebf474b51c9", "content_id": "f958e3b88a5c02e8126a5a2214b8430492fc262a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 785, "license_type": "no_license", "max_line_length": 78, "num_lines": 31, "path": "/menu.py", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n\ndef inputRangeNumber(prompt, validChoice=None):\n '''\n Usage: choice = inputRangeNumber('Please input a valid number [1 ~ 3]: ')\n '''\n if not(validChoice):\n # Set is mutable object, so we assign default value from if expression\n validChoice = {1, 2, 3}\n while True:\n try:\n choice = int(input(prompt))\n if choice in validChoice:\n break\n except ValueError:\n pass\n\n return choice\n\n\ndef displayMenu(options):\n for index in range(len(options)):\n print('{:d}. {:s}'.format(index+1, options[index]))\n\n choice = inputRangeNumber('Please choose a menu item [1 ~ 3]: \\n')\n\n return options[choice - 1]\n\n\nprint(displayMenu(['enter name', 'display greeting', 'quit']))\n" }, { "alpha_fraction": 0.5805991291999817, "alphanum_fraction": 0.6233951449394226, "avg_line_length": 25.961538314819336, "blob_id": "fdf29fbc1d94e59e3223b87ea57a3255925fd990", "content_id": "82579b61e96accb0d5e994b83f75d66b32cb0338", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 61, "num_lines": 26, "path": "/BMI.py", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n# Body mass index(BMI)\n# wiki: https://en.wikipedia.org/wiki/Body_mass_index\n\nprint('\\nBMI calculator, build with python v3.7.3\\n')\n\nheight = float(input('\\nYour height(CM) \\n'))\nweight = float(input('\\nYour weight(KG) \\n'))\n\nbmi = weight / ((height / 100) ** 2)\n\nif bmi < 18.5:\n print('\\nYour BMI is', round(bmi, 4), 'Underweight')\n\nelif bmi >= 18.5 and bmi < 25:\n print('\\nYour BMI is', round(bmi, 4), 'Healthy weight')\n\nelif bmi >= 25 and bmi < 30:\n print('\\nYour BMI is', round(bmi, 4), 'Overweight')\n\nelif bmi >= 30 and bmi < 35:\n print('\\nYour BMI is', round(bmi, 4), 'Moderately obese')\n\nelif bmi > 35:\n print('\\nYour BMI is', round(bmi, 4), 'Severely obese')\n" }, { "alpha_fraction": 0.6794840097427368, "alphanum_fraction": 0.687174379825592, "avg_line_length": 30.248062133789062, "blob_id": "2c12b59590bc30f628dd027a84b5ba5676ad32e8", "content_id": "e107d08462a46ddbd43a5c6012096ce81f360e42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4725, "license_type": "no_license", "max_line_length": 80, "num_lines": 129, "path": "/functions.py", "repo_name": "lbwa/hello-python", "src_encoding": "UTF-8", "text": "#!/bin/env python\n\n'''\nDefine functions\nDOC: https://docs.python.org/3/tutorial/controlflow.html#defining-functions\n\nWhat is the difference between arguments and parameters?\n实参 arguments 和形参 parameters 的区别?\n形参是在函数的定义中出现,定义了一个函数接受什么类型的参数;\n实参是调用函数时,实际传入的参数的值。\nhttps://docs.python.org/3/faq/programming.html#faq-argument-vs-parameter\ndef func(foo, bar=None, **kwargs):\n pass\nfoo, bar, kwargs 是形参。\nfunc(42, bar=314, extra=somevar)\n42, 314, somevar 是实参。\n'''\n\n\ndef multipleCaller(fn, title, total=5):\n if title:\n print('\\n==', '[Entire calling title]:', title, '==')\n\n print('\\n--Activate new function calling--\\n')\n\n for index in range(total):\n print('\\nCalling', index)\n print('Calling result:', fn())\n\n print('\\n--Function calling end--\\n')\n\n\nprint('''\n================================================================================\n默认实参 —— 可变默认实参(可变对象),不可变默认实参(不可变对象)\nDefault arguments\n1. Default argument with mutable object can be mutated when every calling\neg. list, dict, set\n\nNOTICE: Every function calling will change default arguments of those type !!\n\n2. Default argument with immutable object will be immutable\neg. str, int, tuple, bool\n\nNOTICE: Default arguments should be always immutable,\notherwise every function calling will mutate default arguments\n\nDOC:https://docs.python.org/3/tutorial/controlflow.html#default-argument-values\n================================================================================\n''')\n\n\n# Mutable object for default arguments\ndef mutableDefaultArguments(default=[]):\n # mutate arguments\n default.append('mutate')\n # Every calling has different default variable\n # (same object(memeroy address), but different value).\n return default\n\n\n# Immutable object for default arguments\ndef immutableDefaultArguments(default=1):\n # mutate arguments\n default = default + 2\n # Always return 3\n return default\n\n\nmultipleCaller(mutableDefaultArguments, 'Mutable object', 5)\nmultipleCaller(immutableDefaultArguments, 'Immutable object', 5)\n\nprint('''\n================================================================================\n关键字参数\n一个在函数调用中以标识符(例如name =)开头,或作为前面带有**的字典中的值传递的参数\nkeyword arguments\nan argument preceded by an identifier (e.g. name=) in a function call\nor passed as a value in a dictionary preceded by **\n\n位置参数\n一个不是关键字参数的参数。位置参数可出现在参数列表的开头和(或)作为一个在之前有 * 标记\n的可迭代元素传入。\npositional argument: an argument that is not a keyword argument. Positional\narguments can appear at the beginning of an argument list and/or be passed as\nelements of an iterable preceded by *. For example, 3 and 5 are both positional\narguments in the following calls:\n\ncomplex(3, 5)\ncomplex(*(3, 5))\n\nDOC: https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments\n\nIn a function call, keyword arguments must follow positional arguments.\nAll the keyword arguments passed must match one of the arguments accepted by\nthe function, and their order is not important. This also includes non-optional\narguments. No argument may receive a value more than once.\n\n在当次函数调用中,关键字参数 keyword argument 必须在位置参数 positional argument 之\n后。所有传入的关键字参数必须与函数所能接受的参数之一匹配,并且关键字参数的顺序是不重要\n的。这些规则同样适用于非可选参数。没有参数可接受一个值超过多次。\n================================================================================\n''')\n\n\n# 在 python 中 *pw 类似于 JS 中的函数 rest 参数,不同的是 python 组合的结果是 tuple\ndef keywordArguments(pa, some='Default argument', *pw, **kw):\n print('\\npa is', pa)\n print('some is', some)\n # Always be a tuple type\n print('pw is', pw)\n # Always be a dict type\n print('kw is', kw)\n\n\n# Variable `pa` is a required argument, also positional argument\nkeywordArguments('Required positional argument')\n\n# `some='Keyword argument'` is a keyword argument, also optional argument\n# 此时 some 为关键字参数\nkeywordArguments('Required positional argument', some='Keyword argument')\n\nkeywordArguments(\n 'Required positional argument',\n 'Positional argument 1', # 此时第二参数的传参形式为 位置参数,而非关键字参数\n 'Positional argument 2',\n 'Positional argument 3',\n kw='keyword argument'\n)\n" } ]
6
andredemori/Indexing-methods-applied-to-spatial-data
https://github.com/andredemori/Indexing-methods-applied-to-spatial-data
58006ff51d7adb935813e1bfbb751094eb63866a
60b0b8f7b6e092260a5c74de9476acfa6490e07f
77b26cb0bc6a38112165a2b6e2be098bc96bf0e3
refs/heads/master
2021-06-26T09:16:21.244534
2021-02-27T19:34:55
2021-02-27T19:34:55
212,236,708
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6152849793434143, "alphanum_fraction": 0.6285621523857117, "avg_line_length": 23.10569190979004, "blob_id": "807f6824489705166203dff63153a70c1371acc5", "content_id": "000d3eba12c7d0af71c37298da34716b57755dd7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3088, "license_type": "no_license", "max_line_length": 108, "num_lines": 123, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/controller/Application.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare.controller;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\n\r\nimport treeCompare.data.Star;\r\nimport treeCompare.source.Read;\r\nimport treeCompare.tree.Tree;\r\nimport treeCompare.math.Statistics;\r\nimport java.util.concurrent.TimeUnit;\r\n\r\npublic class Application {\r\n\r\n\tprivate String datasetFile;\r\n\tprivate String pointsFile;\r\n\tprivate String distanceFile;\r\n\tprivate String output;\r\n\r\n\t/**\r\n\t * @param input\r\n\t * @param output\r\n\t * @param output2\r\n\t */\r\n\tpublic Application(String datasetFile, String pointsFile, String distanceFile, String output) {\r\n\t\tthis.datasetFile = datasetFile;\r\n\t\tthis.distanceFile = distanceFile;\r\n\t\tthis.output = output;\r\n\t\tthis.pointsFile = pointsFile;\r\n\t}\r\n\r\n\tpublic void execute() {\r\n\r\n\t\tRead read = new Read(datasetFile, \"UTF-8\", \",\");\r\n\t\tTree tree = new Tree();\r\n\t\tList<Star> stars = read.read();\r\n\r\n\t\tlong start_buildTree = System.currentTimeMillis();\r\n\t\tfor (Star star : stars) {\r\n\t\t\ttree.insert(star);\r\n\t\t}\r\n\t\tlong stop_buildTree = System.currentTimeMillis() - start_buildTree;\r\n\r\n\t\t//transformar buildtree para segundos\r\n\t\tString str = \"\" + stop_buildTree;\r\n\t\tfloat bt = Float.parseFloat(str);\r\n\t\tbt = bt/1000;\r\n\t\tSystem.out.println(String.format(\"BuildTree = %f\", bt));\r\n\r\n\r\n\t\tread = new Read(pointsFile, \"UTF-8\", \",\");\r\n\t\tList<Star> points = read.read();\r\n\r\n\t\tread = new Read(distanceFile, \"UTF-8\", \",\");\r\n\t\tList<Double> distance = read.readPoints();\r\n\r\n\r\n\r\n\t\tfor (Star star : points) {\r\n\t\t\t\tList<Star> n = tree.search(0.680596704877, star);\r\n\r\n\t\t\tSystem.out.println(\r\n\t\t\t\t\tString.format(\"Numbers of neighbors: %d\", n.size()));\r\n\t\t}\r\n\r\n\r\n\r\n\t\t/*List<Float> times = new ArrayList<>();\r\n\t\tfor (Star star : points) {\r\n\t\t\tfor (Double dist : distance) {\r\n\t\t\t\tlong start_find = System.currentTimeMillis();\r\n\t\t\t\ttree.search(dist, star);\r\n\t\t\t\tlong stop_find = System.currentTimeMillis();\r\n\r\n\t\t\t\tlong time = stop_find - start_find;\r\n\r\n\t\t\t\t//transformar tempo de mili para segundos\r\n\t\t\t\tString stri = \"\" + time;\r\n\t\t\t\tfloat to = Float.parseFloat(stri);\r\n\t\t\t\tto = to/1000;\r\n\t\t\t\t//System.out.printf(\"Time = %.11f\\n\", to);\r\n\r\n\t\t\t\ttimes.add(to);\r\n\r\n\t\t\t}\r\n\t\t\tStatistics statistics = new Statistics(times);\r\n\r\n\r\n\t\t\tSystem.out.println(\r\n\t\t\t\t\tString.format(\"FN : Point ID = %d, Mean = %.25f, var = %.25f\", star.getPointId(), statistics.getMean(),\r\n\t\t\t\t\t\t\tstatistics.getVariance())\r\n\t\t\t);\r\n\t\t}*/\r\n\r\n\t\t/*List<Float> times = new ArrayList<>();\r\n\t\tint count = 1;\r\n\t\tfor (Double dist : distance) {\r\n\t\t\tfor (Star star : points) {\r\n\t\t\t\tlong start_find = System.currentTimeMillis();\r\n\t\t\t\ttree.search(dist, star);\r\n\t\t\t\tlong stop_find = System.currentTimeMillis();\r\n\r\n\t\t\t\tlong time = stop_find - start_find;\r\n\r\n\t\t\t\t//transformar tempo para segundos\r\n\t\t\t\tString stri = \"\" + time;\r\n\t\t\t\tfloat to = Float.parseFloat(stri);\r\n\t\t\t\tto = to/1000;\r\n\t\t\t\t//System.out.printf(\"Time = %f\", to);\r\n\r\n\t\t\t\ttimes.add(to);\r\n\t\t\t}\r\n\t\t\tStatistics statistics = new Statistics(times);\r\n\r\n\t\t\tSystem.out.println(\r\n\t\t\t\t\tString.format(\"FN : Dist = %d, Mean = %.25f, var = %.25f\", count, statistics.getMean(),\r\n\t\t\t\t\tstatistics.getVariance())\r\n\t\t\t);\r\n\t\t\tcount += 1;\r\n\t\t}*/\r\n\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5434977412223816, "alphanum_fraction": 0.5461883544921875, "avg_line_length": 33.84375, "blob_id": "bb1841bb9bf392a6cd4ab7facb3be0c5bf78416b", "content_id": "1d67466cf934dc365a7e48a1525b56ef783e6331", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2230, "license_type": "no_license", "max_line_length": 96, "num_lines": 64, "path": "/quadtree/QuadtreeTime/quadtree.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from util import EucDist\n\n\nclass QuadTree(object):\n def __init__(self, root, level):\n self.root = root\n self.level = level\n self.nodes = [] # nodes at the last level of the src\n if level > 0:\n self.build_tree([root])\n else:\n self.nodes = [root]\n\n def getInitialLevel(self):\n return self.nodes\n\n def getRoot(self):\n return self.root\n\n def build_tree(self, nodes):\n for n in nodes:\n if n.getLevel() == self.level or n.getSize() < 3:\n self.nodes.append(n)\n else:\n self.build_tree(n.split_node())\n\n \"\"\"def find_neighbors(self, node_search, node, max_distance, neighbor_list):\n centroid = node_search.getCentroid()\n if node.visited is False:\n node_centroid = node.getCentroid()\n node_diagonal = node.voxel.getDiagonal() / 2\n dist = EucDist(centroid.Ra, node_centroid.Ra, centroid.Dec, node_centroid.Dec)\n\n if dist <= max_distance + node_diagonal:\n\n if len(node.children) > 0:\n for child in node.children:\n self.find_neighbors(node_search, child, max_distance, neighbor_list)\n else:\n if node.getCentroid().getId() != centroid.getId():\n neighbor_list.extend(node.elements)\n\n node.mark_visited()\n\n return neighbor_list\"\"\"\n\n def find_neighbors(self, node_search, node, max_distance, neighbor_list):\n if node.visited is False:\n node_centroid = node.getCentroid()\n node_diagonal = node.voxel.getDiagonal() / 2\n dist = EucDist(node_search.Ra, node_centroid.Ra, node_search.Dec, node_centroid.Dec)\n\n if dist <= max_distance + node_diagonal:\n\n if len(node.children) > 0:\n for child in node.children:\n self.find_neighbors(node_search, child, max_distance, neighbor_list)\n else:\n if node.getCentroid().getId() != node_search.getId():\n neighbor_list.extend(node.elements)\n\n node.mark_visited()\n\n return neighbor_list\n" }, { "alpha_fraction": 0.56230628490448, "alphanum_fraction": 0.5672659873962402, "avg_line_length": 29.433961868286133, "blob_id": "59031b4c2062a3b1d9ede204ee17d37e9288692f", "content_id": "cdfaae4290c9e1a567e377523685bb0cba7b3c00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1613, "license_type": "no_license", "max_line_length": 114, "num_lines": 53, "path": "/quadtree/QuadtreeTime/voxel.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from math import sqrt, pow\n\n\nclass Voxel(object):\n def __init__(self, x_left, x_right, y_left, y_right):\n self.x_left = x_left\n self.x_right = x_right\n self.y_left = y_left\n self.y_right = y_right\n self.centroid = self.computeCentroid()\n\n def getBottomLeft(self):\n return [self.x_left, self.y_left]\n\n def getUpRight(self):\n return [self.x_right, self.y_right]\n\n def setCentroidPos(self):\n self.centroid = self.computeCentroid()\n\n def getVoxelCentroid(self):\n return self.centroid\n\n def getHeightSize(self):\n return self.y_right - self.y_left\n\n def getSideSize(self):\n return self.x_right - self.x_left\n\n def isPointinVoxel(self, x, y):\n return self.x_left <= x <= self.x_right and self.y_left <= y <= self.y_right\n\n def getDiagonal(self):\n return sqrt(pow(self.x_right - self.x_left, 2) + pow(self.y_right - self.y_left, 2))\n\n def computeCentroid(self):\n x_left = self.getBottomLeft()\n x_l = x_left[0]\n y_l = x_left[1]\n x_right = self.getUpRight()\n x_r = x_right[0]\n y_r = x_right[1]\n cent_ra = ((x_r - x_l) / 2) + x_l\n cent_dec = ((y_r - y_l) / 2) + y_l\n centroid = [cent_ra, cent_dec]\n return centroid\n\n def __str__(self):\n return \"%s,%s,%s,%s,%s\" % (self.x_left, self.x_right, self.y_left, self.y_right, self.centroid)\n\n def __eq__(self, other):\n return self.x_left == other.x_left and self.x_right == other.x_right and self.y_left == other.y_left and \\\n self.y_right == other.y_right\n" }, { "alpha_fraction": 0.6004464030265808, "alphanum_fraction": 0.6015625, "avg_line_length": 19.33333396911621, "blob_id": "2fe9d7bf0e9bb76ef615d5261672bcdcf7ba00c3", "content_id": "862cebb258d9e67101512362b16c59d894d60e71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 896, "license_type": "no_license", "max_line_length": 97, "num_lines": 42, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/source/Write.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare.source;\r\n\r\nimport java.io.File;\r\nimport java.io.PrintWriter;\r\nimport java.util.List;\r\nimport treeCompare.data.Star;\r\n\r\npublic class Write {\r\n\r\n\tprivate String filename;\r\n\r\n\t/**\r\n\t * @param filename\r\n\t * @param encoding\r\n\t */\r\n\tpublic Write(String filename) {\r\n\t\tthis.filename = filename;\r\n\t}\r\n\t\r\n\r\n\tpublic void write(Star star, List<Star> result) {\r\n\r\n\t\tFile file = new File(this.filename);\r\n\t\tPrintWriter writer = null;\r\n\r\n\t\ttry {\r\n\t\t\twriter = new PrintWriter(file);\r\n\t\t\twriter.write(String.format(\"%d, %f, %f, %f, %f, %f, %f, %f|\", star.getPointId(), star.getRa(),\r\n\t\t\t\t\tstar.getDec(), star.getU(), star.getG(), star.getR(), star.getI(), star.getZ()));\r\n\t\t\t\r\n\t\t\tfor (int i = 0; i < result.size(); i++) {\r\n\t\t\t\twriter.write(result.get(i).toString());\r\n\t\t\t}\r\n\r\n\t\t} catch (Exception e) {\r\n\t\t\tSystem.err.println(e.getMessage());\r\n\t\t} finally {\r\n\t\t\twriter.close();\r\n\t\t}\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.4864763915538788, "alphanum_fraction": 0.5247413516044617, "avg_line_length": 34.5, "blob_id": "e51089c18ee6101a52104c95a1612fc6324f2a46", "content_id": "5b2cd9162ff2e46419ffa6ee2bfeab716a4cdea7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8023, "license_type": "no_license", "max_line_length": 114, "num_lines": 226, "path": "/quadtree/QuadtreeTime/node.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from collections import deque\nfrom math import sqrt\n\nfrom util import EucDist\nfrom voxel import Voxel\n\n\nclass Node(object):\n # Structure:\n # voxel: The definition of the spatial region covered by this Node\n # centroid: the element (star) closest to the spatial centroid of the Node\n # parent: the parent node in the tree\n # level: the node level in the tree\n\n def __init__(self, voxel, centroid=None, parent=None, level=0):\n self.centroidStar = centroid\n self.voxel = voxel\n self.elements = deque()\n self.children = []\n self.size = 0\n self.parent = parent\n self.level = level\n self.visited = False\n\n def getElements(self):\n return self.elements\n\n def getChildren(self):\n return self.children\n\n def getVoxel(self):\n return self.voxel\n\n def getCentroid(self):\n return self.centroidStar\n\n def getLevel(self):\n return self.level\n\n def getSize(self):\n return self.size\n\n def addCentroid(self, centroid):\n self.centroidStar = centroid\n\n def addChildren(self, node):\n self.children.append(node)\n\n def addParent(self, node):\n self.parent = node\n\n def addElement(self, star):\n self.elements.append(star)\n\n def uniqueElement(self):\n unique = False\n if self.size == 1:\n unique = True\n return unique\n\n def setSize(self, size):\n self.size = size\n\n def isEmpty(self):\n if not self.elements:\n return True\n\n def mark_visited(self):\n if len(self.children) > 0:\n size = len(self.children)\n count = 0\n\n for n in self.children:\n if n.visited is True:\n count += 1\n\n if count == size:\n self.visited = True\n else:\n pass\n\n def split_node(self):\n level = self.level + 1\n\n voxel = self.getVoxel()\n centroid_voxel = voxel.getVoxelCentroid()\n\n voxel11 = Voxel(voxel.x_left, centroid_voxel[0], centroid_voxel[1], voxel.y_right)\n quarter11 = Node(voxel11, None, self, level)\n\n voxel12 = Voxel(centroid_voxel[0], voxel.x_right, centroid_voxel[1], voxel.y_right)\n quarter12 = Node(voxel12, None, self, level)\n\n voxel01 = Voxel(voxel.x_left, centroid_voxel[0], voxel.y_left, centroid_voxel[1])\n quarter01 = Node(voxel01, None, self, level)\n\n voxel02 = Voxel(centroid_voxel[0], voxel.x_right, voxel.y_left, centroid_voxel[1])\n quarter02 = Node(voxel02, None, self, level)\n\n star11_cent = star12_cent = star01_cent = star02_cent = float(\"inf\")\n star11_med_x = star11_med_y = star12_med_x = star12_med_y = star01_med_x = star01_med_y = star02_med_x = \\\n star02_med_y = 0\n tot_star11 = tot_star12 = tot_star01 = tot_star02 = 0\n border_tot_star11 = border_tot_star12 = border_tot_star01 = border_tot_star02 = 0\n\n for _ in range(len(self.getElements())):\n star = self.elements.popleft()\n if star.getRa() < centroid_voxel[0]:\n if star.getDec() < centroid_voxel[1]:\n quarter01.addElement(star)\n if star.border is False:\n star01_med_x += star.getRa()\n star01_med_y += star.getDec()\n tot_star01 += 1\n else:\n border_tot_star01 += 1\n else:\n quarter11.addElement(star)\n if star.border is False:\n star11_med_x += star.getRa()\n star11_med_y += star.getDec()\n tot_star11 += 1\n else:\n border_tot_star11 += 1\n else:\n if star.getDec() > centroid_voxel[1]:\n quarter12.addElement(star)\n if star.border is False:\n star12_med_x += star.getRa()\n star12_med_y += star.getDec()\n tot_star12 += 1\n else:\n border_tot_star12 += 1\n else:\n quarter02.addElement(star)\n if star.border is False:\n star02_med_x += star.getRa()\n star02_med_y += star.getDec()\n tot_star02 += 1\n else:\n border_tot_star02 += 1\n\n node11_cent = node12_cent = node01_cent = node02_cent = None\n nodes = []\n\n if tot_star11 > 0:\n star11_med_x = star11_med_x / tot_star11\n star11_med_y = star11_med_y / tot_star11\n for star in quarter11.getElements():\n dist = EucDist(star.getRa(), star11_med_x, star.getDec(), star11_med_y)\n if star.border is False and dist < star11_cent:\n node11_cent = star\n star11_cent = dist\n quarter11.centroidStar = node11_cent\n size = tot_star11 + border_tot_star11\n quarter11.setSize(size)\n self.addChildren(quarter11)\n nodes.append(quarter11)\n\n if tot_star12 > 0:\n star12_med_x = star12_med_x / tot_star12\n star12_med_y = star12_med_y / tot_star12\n for star in quarter12.getElements():\n dist = EucDist(star.getRa(), star12_med_x, star.getDec(), star12_med_y)\n if star.border is False and dist < star12_cent:\n node12_cent = star\n star12_cent = dist\n quarter12.centroidStar = node12_cent\n size = tot_star12 + border_tot_star12\n quarter12.setSize(size)\n self.addChildren(quarter12)\n nodes.append(quarter12)\n\n if tot_star01 > 0:\n star01_med_x = star01_med_x / tot_star01\n star01_med_y = star01_med_y / tot_star01\n for star in quarter01.getElements():\n dist = EucDist(star.getRa(), star01_med_x, star.getDec(), star01_med_y)\n if star.border is False and dist < star01_cent:\n node01_cent = star\n star01_cent = dist\n quarter01.centroidStar = node01_cent\n size = tot_star01 + border_tot_star01\n quarter01.setSize(size)\n self.addChildren(quarter01)\n nodes.append(quarter01)\n\n if tot_star02 > 0:\n star02_med_x = star02_med_x / tot_star02\n star02_med_y = star02_med_y / tot_star02\n for star in quarter02.getElements():\n dist = EucDist(star.getRa(), star02_med_x, star.getDec(), star02_med_y)\n if star.border is False and dist < star02_cent:\n node02_cent = star\n star02_cent = dist\n quarter02.centroidStar = node02_cent\n size = tot_star02 + border_tot_star02\n quarter02.setSize(size)\n self.addChildren(quarter02)\n nodes.append(quarter02)\n\n return nodes\n\n def partial_match(self, node_j, distance, epsilon):\n voxel_i = self.getVoxel()\n size_i = voxel_i.getSideSize()\n\n centroid_i = self.getCentroid()\n centroid_j = node_j.getCentroid()\n dist = EucDist(centroid_i.getRa(), centroid_j.Ra, centroid_i.getDec(), centroid_j.getDec())\n\n if size_i < distance:\n if distance + epsilon + (size_i * sqrt(2) >= dist >= distance - epsilon - (size_i * sqrt(2))):\n match = True\n else:\n match = False\n else:\n if (distance + epsilon) >= dist >= (distance - epsilon):\n match = True\n else:\n match = False\n return match\n\n def __str__(self):\n return \"%s;%s;%s;%s;%s;%s\" % (self.centroidStar, self.voxel, len(self.elements), len(self.children),\n self.size, self.level)\n" }, { "alpha_fraction": 0.5879629850387573, "alphanum_fraction": 0.6342592835426331, "avg_line_length": 25.75, "blob_id": "4ad07f7591a789cfa99f314e5f240f57b6c5c585", "content_id": "e40486fc01df13d4588ce3cb22aa50814fa9b2b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 216, "license_type": "no_license", "max_line_length": 162, "num_lines": 8, "path": "/PH-tree/treeCompare/out/artifacts/treeCompare_jar/30_rodadas_phtree.sh", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmax=30\n\nfor i in `seq 1 $max`\ndo\n java -cp treeCompare.jar treeCompare.Main ../../../data/Total0000008col.txt ../../../data/points.csv ../../../data/distances.csv erro >> rodada_{$i}_Phtree.txt\ndone\n\n\n" }, { "alpha_fraction": 0.5480362772941589, "alphanum_fraction": 0.5812689065933228, "avg_line_length": 34.9782600402832, "blob_id": "bb60181ce880b610594c61f9be55a528c86195a7", "content_id": "1607f3bac4b7188fd04e2b33bd6619842d7a4b3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1655, "license_type": "no_license", "max_line_length": 101, "num_lines": 46, "path": "/quadtree/QuadtreeTime/util.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from math import sqrt, pow, ceil, log\n\n\ndef EucDist(x1, x2, y1, y2):\n \"\"\"\n Distance Function: Euclidean\n\n This is already implemented by scipy.spatial.distance.euclidean(u, v)[source]\n http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.euclidean.html\n \"\"\"\n return sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))\n\n\ndef compute_level(l1, l2, distance):\n \"\"\"\n\n :param l1:\n :param l2:\n :param distance:\n :return:\n \"\"\"\n return ceil(log(((l1 + l2) / (2 * distance)), 2) + 1)\n\n\ndef calculate_distance(voxel_node1, voxel_node2):\n point_of_voxel_node1 = [[voxel_node1.x_left, voxel_node1.y_left],\n [voxel_node1.x_right, voxel_node1.y_left],\n [voxel_node1.x_right, voxel_node1.y_right],\n [voxel_node1.x_left, voxel_node1.y_right]]\n\n point_of_voxel_node2 = [[voxel_node2.x_left, voxel_node2.y_left],\n [voxel_node2.x_right, voxel_node2.y_left],\n [voxel_node2.x_right, voxel_node2.y_right],\n [voxel_node2.x_left, voxel_node2.y_right]]\n\n min_distance = float(\"inf\")\n max_distance = float(\"-inf\")\n for data_node1 in point_of_voxel_node1:\n for data_node2 in point_of_voxel_node2:\n calculated_distance = EucDist(data_node1[0], data_node2[0], data_node1[1], data_node2[1])\n if calculated_distance < min_distance:\n min_distance = calculated_distance\n if calculated_distance > max_distance:\n max_distance = calculated_distance\n\n return min_distance, max_distance\n" }, { "alpha_fraction": 0.5422041416168213, "alphanum_fraction": 0.5564607381820679, "avg_line_length": 32.469696044921875, "blob_id": "ac9c9eb17f65a4d2da3c37e2b44dc34b6fd1fd2f", "content_id": "9d58c9f78ef2a96c7d6d29f809a17a086313154d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4419, "license_type": "no_license", "max_line_length": 113, "num_lines": 132, "path": "/quadtree/QuadtreeTime/application.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from sys import argv\n\nfrom node import Node\nfrom element import Element\nfrom quadtree import QuadTree\nfrom query import Query\nfrom util import EucDist, compute_level\nfrom voxel import Voxel\n\nimport numpy as np\n\nimport time\n\n\ndef build_tree(filename, query):\n start_time = time.time()\n\n root = None\n geometric_centroid_ra = geometric_centroid_dec = None\n centroid = None\n cent_min_dist = float(\"inf\")\n voxel = None\n\n with open(filename) as f:\n for line in f:\n split = line.replace(\"\\n\", \"\").split(\",\")\n if len(split) == 4:\n min_ra, max_ra, min_dec, max_dec = split\n voxel = Voxel(float(min_ra), float(max_ra), float(min_dec), float(max_dec))\n geometric_centroid_ra, geometric_centroid_dec = voxel.getVoxelCentroid()\n root = Node(voxel)\n elif line:\n border = False # if split[13].lower() == \"false\" else True\n\n star = Element(int(split[0]), float(split[1]), float(split[2]), float(split[3]), float(split[4]),\n float(split[5]), float(split[6]), float(split[7]), float(split[8]),\n float(split[9]), float(split[10]), float(split[11]), float(split[12]), 0, border)\n\n root.addElement(star)\n\n if star.border is False:\n dist = EucDist(star.getRa(), geometric_centroid_ra, star.getDec(), geometric_centroid_dec)\n if dist < cent_min_dist:\n centroid = star\n cent_min_dist = dist\n\n root.setSize(len(root.getElements()))\n root.addCentroid(centroid)\n\n level = compute_level(voxel.getSideSize(), voxel.getHeightSize(), query.getMaxDistance())\n tree = QuadTree(root, level)\n\n end_time = time.time() - start_time\n print(\"BT - %s - %0.25f\" % (filename, end_time))\n return tree\n\n\ndef produce_candidates(element, distance, tree):\n root = tree.root\n\n start_time = time.time()\n neighbors = tree.find_neighbors(element, root, distance, [])\n\n\n #print(len(neighbors))\n\n return neighbors\n\n\nif __name__ == '__main__':\n if len(argv) < 3:\n print(\"Pure Constellations Queries:\\n[1]InputFile\\n[2]OutputFile\\n[3]Neighbor Limit\")\n else:\n\n # Create query object instance\n query = Query.defineQuery(float(argv[3]))\n\n\n # Read Random points\n pointsList = []\n with open(\"/home/demori/points.csv\", \"r\") as f:\n for line in f:\n split = line.split(\",\")\n element = Element(int(split[0]), float(split[1]), float(split[2]), float(split[3]), 0,\n float(split[4]), 0, float(split[5]), 0, float(split[6]), 0,\n float(split[7]), 0, 0, False)\n pointsList.append(element)\n\n # Read Distance points\n distanceList = []\n with open(\"/home/demori/distances.csv\", \"r\") as f:\n for line in f:\n distanceList.append(float(line))\n\n\n\n tree = build_tree(argv[1], query)\n#objetos\n\n for element in pointsList:\n time_list = []\n for distance in distanceList:\n start_time = time.time()\n produce_candidates(element, distance, tree)\n end_time = time.time() - start_time\n\n time_list.append(end_time)\n\n array_time = np.array(time_list)\n mean = np.mean(array_time, dtype=np.float64)\n standard_deviation = np.var(array_time, dtype=np.float64)\n print(\"FN : Point ID = %d, Mean = %0.25f, std=%0.25f\" % (element.getId(), mean, standard_deviation))\n del time_list, array_time, mean, standard_deviation\n\n#distancias\n'''\n for distance in distanceList:\n time_list = []\n\n for element in pointsList:\n\n start_time = time.time()\n produce_candidates(element, distance, tree)\n end_time = time.time() - start_time\n\n time_list.append(end_time)\n\n array_time = np.array(time_list)\n mean = np.mean(array_time, dtype=np.float64)\n standard_deviation = np.var(array_time, dtype=np.float64)\n print(\"FN : Point ID = %d, Mean = %0.25f, std=%0.25f\" % (element.getId(), mean, standard_deviation))\n del time_list, array_time, mean, standard_deviation'''\n\n" }, { "alpha_fraction": 0.6521282196044922, "alphanum_fraction": 0.6573830842971802, "avg_line_length": 23.039474487304688, "blob_id": "20b052b07371f1c6aabb68a63ba21dcf43282efb", "content_id": "508aeb48a17645467e95b6ba99a2ac7f126a858d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1903, "license_type": "no_license", "max_line_length": 92, "num_lines": 76, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/source/Read.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare.source;\r\n\r\nimport java.util.List;\r\nimport java.util.Scanner;\r\nimport java.io.File;\r\nimport java.io.FileNotFoundException;\r\nimport java.io.IOError;\r\nimport java.util.ArrayList;\r\nimport treeCompare.data.Star;\r\n\r\npublic class Read {\r\n\r\n\tprivate String filename;\r\n\tprivate String encoding;\r\n\tprivate String lineSplit;\r\n\r\n\t/**\r\n\t * \r\n\t * @param filename\r\n\t * @param encoding\r\n\t * @param lineSplit\r\n\t */\r\n\tpublic Read(String filename, String encoding, String lineSplit) {\r\n\t\tthis.filename = filename;\r\n\t\tthis.encoding = encoding;\r\n\t\tthis.lineSplit = lineSplit;\r\n\t}\r\n\r\n\tpublic List<Star> read() {\r\n\t\tList<Star> stars = new ArrayList<>();\r\n\r\n\t\tFile file = new File(this.filename);\r\n\t\tScanner scanner = null;\r\n\t\ttry {\r\n\t\t\tscanner = new Scanner(file, this.encoding);\r\n\t\t\twhile (scanner.hasNext()) {\r\n\t\t\t\tString line = scanner.next();\r\n\t\t\t\tString[] partOfLines = line.split(this.lineSplit);\r\n\t\t\t\tStar star = new Star(Long.parseLong(partOfLines[0]), Double.parseDouble(partOfLines[1]),\r\n\t\t\t\t\t\tDouble.parseDouble(partOfLines[2]), Float.parseFloat(partOfLines[3]),\r\n\t\t\t\t\t\tFloat.parseFloat(partOfLines[4]), Float.parseFloat(partOfLines[5]),\r\n\t\t\t\t\t\tFloat.parseFloat(partOfLines[6]), Float.parseFloat(partOfLines[7]), 0.0);\r\n\t\t\t\tstars.add(star);\r\n\t\t\t}\r\n\t\t} catch (IOError | FileNotFoundException e) {\r\n\t\t\tSystem.err.println(e.getMessage());\r\n\t\t} finally {\r\n\t\t\tscanner.close();\r\n\t\t}\r\n\r\n\t\treturn stars;\r\n\t}\r\n\r\n\t\r\n\tpublic List<Double> readPoints() {\r\n\t\tList<Double> points = new ArrayList<>();\r\n\r\n\t\tFile file = new File(this.filename);\r\n\t\tScanner scanner = null;\r\n\t\ttry {\r\n\t\t\tscanner = new Scanner(file, this.encoding);\r\n\t\t\twhile (scanner.hasNext()) {\r\n\t\t\t\tString line = scanner.next();\r\n\t\t\t\t\r\n\t\t\t\tpoints.add(Double.parseDouble(line));\r\n\t\t\t}\r\n\t\t} catch (IOError | FileNotFoundException e) {\r\n\t\t\tSystem.err.println(e.getMessage());\r\n\t\t} finally {\r\n\t\t\tscanner.close();\r\n\t\t}\r\n\r\n\t\treturn points;\r\n\t}\r\n\t\r\n}\r\n" }, { "alpha_fraction": 0.6807511448860168, "alphanum_fraction": 0.7323943376541138, "avg_line_length": 25.375, "blob_id": "87e374d645796b1016daa7921c578440ba0482a6", "content_id": "eabedbc80ae140404a4ef065dd253534e4273f67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 213, "license_type": "no_license", "max_line_length": 159, "num_lines": 8, "path": "/Slim-tree/sample/30_times_Slim-tree.sh", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmax=30\n\nfor i in `seq 1 $max`\ndo\n /home/demori/teste/arboretumR2/sample/sample-01_slimtree_2/Stars >>/home/demori/teste/arboretumR2/sample/sample-01_slimtree_2/logs/rodada_{$i}_Slim-tree.txt\ndone\n\n\n" }, { "alpha_fraction": 0.6233333349227905, "alphanum_fraction": 0.75, "avg_line_length": 19, "blob_id": "2876c95a5c098f8a17beb742735a037f0d35dacf", "content_id": "e117d1c108c4e8fdb17c6d4a352ab9bf23f146a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 300, "license_type": "no_license", "max_line_length": 110, "num_lines": 15, "path": "/quadtree/quadtreeBT_FN-time.sh", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nmax=30\n\nQUADTREE_CODE=./QuadtreeTime/application.py\n\nDATA=./Total000000.csv\n\nLOG_QUADTREE_CODE=./logs_objetos/QuadtreeTime_Total000000_executio\n\n\nfor i in `seq 1 $max`\ndo\n python $QUADTREE_CODE $DATA 1 0.0041666666666667 >> ./logs_objetos/QuadtreeTime_Total00000_round_${i}.txt\ndone\n" }, { "alpha_fraction": 0.48126065731048584, "alphanum_fraction": 0.48126065731048584, "avg_line_length": 29.894737243652344, "blob_id": "d600c2ae537ab0b724426adc05875641fbb768d3", "content_id": "452a03698f4386cec5c9644cf0175a23b1bfdc1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1174, "license_type": "no_license", "max_line_length": 119, "num_lines": 38, "path": "/quadtree/QuadtreeTime/element.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "class Element(object):\n def __init__(self, pointId, ra, dec, u, u_err, g, g_err, r, r_err, i, i_err, z, z_err, distToCentroid, border):\n self.pointId = pointId\n self.Ra = ra\n self.Dec = dec\n self.u = u\n self.u_err = u_err\n self.g = g\n self.g_err = g_err\n self.r = r\n self.r_err = r_err\n self.i = i\n self.i_err = i_err\n self.z = z\n self.z_err = z_err\n self.distToCentroid = distToCentroid\n self.border = border\n\n def getId(self):\n return self.pointId\n\n def getRa(self):\n return self.Ra\n\n def getDec(self):\n return self.Dec\n\n def getDistanceTOC(self):\n return self.distToCentroid\n\n def __str__(self):\n return \"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\" % (self.pointId, self.Ra, self.Dec, self.u, self.u_err, self.g,\n self.g_err, self.r, self.r_err, self.i, self.i_err,\n self.z, self.z_err)\n\n def __eq__(self, other):\n if type(other) is Element:\n return self.pointId == other.pointId\n" }, { "alpha_fraction": 0.45905986428260803, "alphanum_fraction": 0.48605459928512573, "avg_line_length": 32.64812469482422, "blob_id": "bcd259231ff65e9a633dece55debe32413e33b78", "content_id": "e03bd1c2e2ec59c3cfc22809688a0dd390d25608", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 26784, "license_type": "no_license", "max_line_length": 160, "num_lines": 773, "path": "/Slim-tree/sample/app.cpp", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "//---------------------------------------------------------------------------\r\n// app.cpp - Implementation of the application.\r\n//\r\n// To change the behavior of this application, comment and uncomment lines at\r\n// TApp::Init() and TApp::Query().\r\n//\r\n// Authors: Marcos Rodrigues Vieira ([email protected])\r\n// André Muniz Demori ([email protected])\r\n// Copyright (c) 2003 GBDI-ICMC-USP\r\n//---------------------------------------------------------------------------\r\n#include <iostream>\r\n#include <string>\r\n#include <iomanip>\r\n#include <fstream>\r\n#include <math.h>\r\n#pragma hdrstop\r\n#include \"app.h\"\r\n//---------------------------------------------------------------------------\r\n#pragma package(smart_init)\r\n\r\n//---------------------------------------------------------------------------\r\n// Class TApp\r\n//------------------------------------------------------------------------------\r\nvoid TApp::CreateTree(){\r\n // create for Slim-Tree\r\n SlimTree = new mySlimTree(PageManager);\r\n}//end TApp::CreateTree\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::CreateDiskPageManager(){\r\n //for SlimTree\r\n PageManager = new stPlainDiskPageManager(\"SlimTree.dat\", 1024);\r\n}//end TApp::CreateDiskPageManager\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::Run(){\r\n\r\nstd::cout << std::fixed << std::setprecision(25);\r\n\r\n\r\n // Lets load the tree with a lot values from the file.\r\n //cout << \"\\n\\nAdding objects in the SlimTree\";\r\n\r\n clock_t x;\r\n int y;\r\n y = clock();\r\n\r\n LoadTree(DATASET);\r\n\r\n y = clock() - y;\r\n cout << \"\\nTotal tree creation time: \" <<((float)y)/CLOCKS_PER_SEC << \" segundos\";\r\n\r\n cout << \"\\n\\nLoading the query file\";\r\n LoadVectorFromFile(QUERYSTARFILE);\r\n\r\n if (queryObjects.size() > 0){\r\n // Do 500 queries.\r\n PerformQueries();\r\n }//end if\r\n // Hold the screen.\r\n cout << \"\\n\\nFinished the whole test!\";\r\n}//end TApp::Run\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::Done(){\r\n\r\n if (this->SlimTree != NULL){\r\n delete this->SlimTree;\r\n }//end if\r\n if (this->PageManager != NULL){\r\n delete this->PageManager;\r\n }//end if\r\n\r\n // delete the vetor of queries.\r\n for (unsigned int i = 0; i < queryObjects.size(); i++){\r\n delete (queryObjects.at(i));\r\n }//end for\r\n}//end TApp::Done\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::LoadTree(char * fileName){\r\n ifstream in(fileName);\r\n char starID[200];\r\n double dRa,dDec,dU,dG,dR,dI,dZ;\r\n long w = 0;\r\n TStar * star;\r\n\r\n if (SlimTree!=NULL){\r\n\r\n if (in.is_open()){\r\n //cout << \"\\nLoading objects \";\r\n while(in.getline(starID, 200, '\\t')){\r\n in >> dRa;\r\n in >> dDec;\r\n in >> dU;\r\n in >> dG;\r\n in >> dR;\r\n in >> dI;\r\n in >> dZ;\r\n in.ignore();\r\n star = new TStar(starID, dRa, dDec, dU, dG, dR, dI, dZ);\r\n SlimTree->Add(star);\r\n delete star;\r\n w++;\r\n if (w % 10 == 0){\r\n //cout << '.';\r\n }//end if*/\r\n }//end while\r\n //cout << \" Added \" << SlimTree->GetNumberOfObjects() << \" objects \";\r\n in.close();\r\n }else{\r\n cout << \"\\nProblem to open the file.\";\r\n }//end if\r\n\r\n }else{\r\n cout << \"\\n Zero object added!!\";\r\n }//end if\r\n\r\n}//end TApp::LoadTree\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::LoadVectorFromFile(char * fileName){\r\n ifstream in(fileName);\r\n char starID[200];\r\n int cont;\r\n double dRa, dDec, dU, dG, dR, dI, dZ;\r\n\r\n // clear before using.\r\n queryObjects.clear();\r\n\r\n if (in.is_open()){\r\n cout << \"\\nLoading query objects \";\r\n cont = 0;\r\n while(in.getline(starID, 200, '\\t')){\r\n in >> dRa;\r\n in >> dDec;\r\n in >> dU;\r\n in >> dG;\r\n in >> dR;\r\n in >> dI;\r\n in >> dZ;\r\n in.ignore();\r\n this->queryObjects.insert(queryObjects.end(), new TStar(starID, dRa, dDec, dU, dG, dR, dI, dZ));\r\n cont++;\r\n }//end while\r\n cout << \" Added \" << queryObjects.size() << \" query objects \";\r\n in.close();\r\n }else{\r\n cout << \"\\nProblem to open the query file.\";\r\n cout << \"\\n Zero object added!!\\n\";\r\n }//end if\r\n}//end TApp::LoadVectorFromFile\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::PerformQueries(){\r\n if (SlimTree){\r\n cout << \"\\nStarting Statistics for Range Query with SlimTree.... \";\r\n PerformRangeQuery();\r\n cout << \" Ok\\n\";\r\n\r\n // cout << \"\\nStarting Statistics for Nearest Query with SlimTree.... \";\r\n // PerformNearestQuery();\r\n // cout << \" Ok\\n\";\r\n }//end if\r\n}//end TApp::PerformQuery\r\n\r\n//------------------------------------------------------------------------------\r\n /*----------------\r\n\r\nvoid TApp::PerformRangeQuery(){\r\n\r\n myResult * result;\r\n stDistance radius;\r\n clock_t start, end;\r\n unsigned int size;\r\n unsigned int i;\r\n long double time_1;\r\n long double time_2;\r\n long double time_3;\r\n long double time_4;\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 0.267626657835);\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_0267626657835.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_0267626657835();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_0267626657835();\r\n\r\n\r\n\r\n /*\r\n //tempo de cada um / qtd de objetos\r\n\r\n if(i == 0)\r\n {\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n cout <<( (double)time_1) / size << \"(s)\\n\";\r\n }else if(i == 1){\r\n time_2 = ((double )end-(double )start_2) / 1000000.0;\r\n time_4 = ((double)time_2);\r\n time_3 = ((double )time_2-(double )time_1) / size;\r\n cout << ( (double)time_3) << \"(s)\\n\";\r\n }else{\r\n time_2 = ((double )end-(double )start_2) / 1000000.0;\r\n time_3 = ((double )time_2-(double )time_4) / size;\r\n time_4 = ((double)time_2);\r\n std::cout << std::fixed << std::setprecision(15);\r\n cout << ( (double)time_3) << \"(s)\\n\";\r\n }\r\n\r\n end = clock();\r\n */\r\n\r\n/*--------------------\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_0.267626657835.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size << \"(s)\";\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size << \"(s)\";\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n }//end if\r\n\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 0.66137703749);\r\n\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_066137703749.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_066137703749();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_066137703749();\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_0.66137703749.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size;\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size;\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n\r\n }//end if\r\n\r\n\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 0.799373869089);\r\n\r\n\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_0799373869089.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_0799373869089();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_0799373869089();\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_0.799373869089.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n\r\n\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size;\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size;\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n\r\n }//end if\r\n\r\n\r\n\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 0.624763269697);\r\n\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_0624763269697.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_0624763269697();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_0624763269697();\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_0.624763269697.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size;\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size;\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n\r\n }//end if\r\n\r\n\r\n\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 1.0);\r\n\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_010.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_010();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_10();\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_1.0.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size;\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size;\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n\r\n }//end if\r\n\r\n\r\n\r\n\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n clock_t start_2, end;\r\n start_2 = clock();\r\n for (i = 0; i < size; i++) //size = 9999\r\n {\r\n result = SlimTree->RangeQuery(queryObjects[i], 0.763196789371);\r\n\r\n\r\n //Ra and Dec from object\r\n //cout << \"\\n\\nObject: \\n\";\r\n //cout << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \"<< queryObjects[i]->GetDec() << \"\\n\\n\";\r\n\r\n\r\n //escreve em disco o Ra e Dec do objeto e dos vizinhos dele\r\n ofstream arquivo_1;\r\n arquivo_1.open(\"objetos_e_vizinhos/objetos_e_vizinhos_0763196789371.txt\",ios::app);\r\n arquivo_1 << \"\\n\\nObject: \\n\" << \"RA: \" << queryObjects[i]->GetRa() << \"\\tDec: \" << queryObjects[i]->GetDec() << \"\\n\\n\" << \"Neighbors (Ra,Dec): \\n\";\r\n arquivo_1.close();\r\n result->GetRa_Dec_Neighbors_RangeQuery_0763196789371();\r\n\r\n\r\n //Ra and Dec from neighbors\r\n //cout << \"Neighbors (Ra,Dec): \\n\";\r\n // cout << result->GetRa_Dec_Neighbors_RangeQuery() << \"\\n\\n\";\r\n\r\n\r\n //distancias\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n //cout << \"\\nDistances from neighbors:\\n\" << result->GetDistancesRangeQuery();\r\n result->GetDistancesRangeQuery_0763196789371();\r\n\r\n\r\n //tempo\r\n ofstream arquivo;\r\n arquivo.open(\"soma_dos_tempos/soma_dos_tempos_0.763196789371.txt\",ios::app);\r\n // soma do tempo de cada um\r\n std::cout << std::fixed << std::setprecision(15);\r\n //cout << \"\\nTotal Time: \" << ((double )end-(double )start_2) / 1000000.0 << \"(s)\\n\";\r\n end = clock();\r\n time_1 = ((double )end-(double )start_2) / 1000000.0;\r\n arquivo << time_1 << \"\\n\";\r\n arquivo.close();\r\n\r\n delete result;\r\n }//end for\r\n\r\n end = clock();\r\n //cout << \"\\nRadius: \" << result->GetRadius();\r\n\r\n //cout << \"\\nDistances: \\n\" << result->GetDistancesRangeQuery(); //pega o ultimo\r\n\r\n cout << \"\\n\\nRadius: \" << result->GetRadius();\r\n\r\n cout << \"\\nAverage: \" << time_1/size;\r\n\r\n cout << \"\\nVariance: \" << (time_1 - (time_1/size))/size;\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n\r\n\r\n }//end if\r\n*/\r\n\r\n//pega as 100 distancias geradas aleatóriamente\r\n\r\nvoid TApp::PerformRangeQuery(){\r\n\r\nstd::cout << std::fixed << std::setprecision(25);\r\n\r\n double v[100];\r\n\r\n for(int p = 0 ; p <= 99 ; p++)\r\n {\r\n\r\n ifstream arquivo_distancias;\r\n string linha;\r\n\r\n arquivo_distancias.open(\"/home/demori/teste/arboretumR2/sample/sample-01_slimtree_2/datasets/Gerador/TestDist.csv\");\r\n\r\n if(arquivo_distancias.is_open())\r\n {\r\n while(getline(arquivo_distancias,linha))\r\n {\r\n double linha_double = atof(linha.c_str());\r\n\r\n v[p] = linha_double;\r\n }\r\n }\r\n }\r\n\r\n myResult * result;\r\n stDistance radius;\r\n unsigned int size;\r\n unsigned int i;\r\n long double time_1;\r\n long double time_2;\r\n float temp[10000];\r\n unsigned int contador = 0;\r\n float soma = 0;\r\n\r\n\r\n if (SlimTree)\r\n {\r\n size = queryObjects.size();\r\n // reset the statistics\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n\r\n\r\n for(int k = 0 ; k <= 99 ; k++)\r\n {\r\n contador = 0;\r\n\r\n for (i = 0; i < size; i++)\r\n {\r\n\r\n clock_t t;\r\n int f;\r\n t = clock();\r\n\r\n result = SlimTree->RangeQuery(queryObjects[k], v[i]);\r\n\r\n t = clock() - t;\r\n temp[contador] = ((float)t)/CLOCKS_PER_SEC;\r\n contador = contador + 1;\r\n\r\n delete result;\r\n }//end for\r\n\r\n float somador = temp[0];\r\n float media = 0;\r\n\r\n for(int u = 0 ; u < 99 ; u++)\r\n {\r\n somador = somador + temp[u+1];\r\n }\r\n\r\n media = somador / size;\r\n cout << \"\\n\\nObjeto: \" << queryObjects[k]->GetID() << \"\\n\";\r\n cout << \"Total time: \" << somador << \"\\n\";\r\n cout << \"Average: \" << media << \"\\n\";\r\n cout << \"Variance: \" << (somador - media)/size << \"\\n\";\r\n cout << \"Standart deviation: \" << sqrt((somador - media)/size);\r\n cout << \"\\n\\n\";\r\n\r\n\r\n }//end for\r\n\r\n }//end if\r\n\r\n}//end TApp::PerformRangeQuery\r\n\r\n//------------------------------------------------------------------------------\r\nvoid TApp::PerformNearestQuery(){\r\n\r\n myResult * result;\r\n clock_t start, end;\r\n unsigned int size;\r\n unsigned int i;\r\n\r\n if (SlimTree){\r\n size = queryObjects.size();\r\n PageManager->ResetStatistics();\r\n SlimTree->GetMetricEvaluator()->ResetStatistics();\r\n start = clock();\r\n for (i = 0; i < size; i++){\r\n result = SlimTree->NearestQuery(queryObjects[i], 15);\r\n cout << \"\\nK: \" << result->GetK();\r\n cout << \"\\nDistances: \\n\" << result->GetDistancesNearestQuery();\r\n\r\n delete result;\r\n }//end for\r\n end = clock();\r\n\r\n cout << \"\\nTotal Time: \" << ((double )end-(double )start) / 1000.0 << \"(s)\";\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Disk Accesses: \" << (double )PageManager->GetReadCount() / (double )size;\r\n // is divided for queryObjects to get the everage\r\n cout << \"\\nAvg Distance Calculations: \" <<\r\n (double )SlimTree->GetMetricEvaluator()->GetDistanceCount() / (double )size;\r\n }//end if\r\n}//end TApp::PerformNearestQuery\r\n" }, { "alpha_fraction": 0.7108433842658997, "alphanum_fraction": 0.7108433842658997, "avg_line_length": 12.833333015441895, "blob_id": "c52be0afdfd5959ecb06c528ec32795403fc583d", "content_id": "73a3384fec72cbcba221c84b1721d91e0ea52870", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 83, "license_type": "no_license", "max_line_length": 24, "num_lines": 6, "path": "/Slim-tree/sample/Slim-tree_Stars.sh", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "#!/bin/bash\ngcc -fpermissive app.cpp\ngcc -c star.cpp\ngcc -c Stars.cpp\nmake\n./Stars\n" }, { "alpha_fraction": 0.6514598727226257, "alphanum_fraction": 0.6678832173347473, "avg_line_length": 22.909090042114258, "blob_id": "1c097e9f2a2855abd289a5a86f9177ca0ae22641", "content_id": "ab4130faa1c69f1acdaef3327f8a8502f9d6efc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 548, "license_type": "no_license", "max_line_length": 99, "num_lines": 22, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/Main.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare;\r\n\r\nimport treeCompare.controller.Application;\r\n\r\npublic class Main {\r\n\r\n\tpublic static void main(String[] args) {\r\n\t\tif (args.length < 4) {\r\n\t\t\tSystem.out.println(\"PhTree:\\n\\t[1]DatasetFile\\n\\t[2]PointFile\\n\\t[3]DistanceFile\\n\\t[4]Output\");\r\n\t\t} else {\r\n\t\t\tString datasetFile = args[0];\r\n\t\t\tString pointsFile = args[1];\r\n\t\t\tString distanceFile = args[2];\r\n\t\t\tString output = args[3];\r\n\t\t\t\r\n\t\t\tApplication application = new Application(datasetFile, pointsFile, distanceFile, output);\r\n\t\t\tapplication.execute();\r\n\t\t}\r\n\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5215417146682739, "alphanum_fraction": 0.5277240872383118, "avg_line_length": 27.114845275878906, "blob_id": "9c8557e4bfaf202ed04214feb4e6e4dd535dab32", "content_id": "f95b4e2d7ead0cb77a36d7ae41847301feaaac46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10352, "license_type": "no_license", "max_line_length": 102, "num_lines": 357, "path": "/Slim-tree/sample/star.h", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "//---------------------------------------------------------------------------\r\n// city.h - Implementation of the User Layer\r\n//\r\n// This file implements the 2 classes required by the SlimTree Library User\r\n// Layer.\r\n//\r\n// TCity is the object which will be indexed by a metric tree. It abstracts a\r\n// city. Each city has a name, a latitude and a longitude. TCity defines an\r\n// interface to manipulate its information and also implements the stObject\r\n// interface.\r\n//\r\n// TCityDistanceEvaluator implements the stMetricEvaluator interface. It will\r\n// mesure the distance between 2 TCity instances.\r\n//\r\n// With these classes, it is possible to define and instantiate any metric tree\r\n// defined by the SlimTree Library.\r\n//\r\n// Authors: Marcos Rodrigues Vieira ([email protected])\r\n// Fabio Jun Takada Chino ([email protected])\n// André Muniz Demori ([email protected])\r\n// Copyright (c) 2002 GBDI-ICMC-USP\r\n//---------------------------------------------------------------------------\r\n#ifndef starH\r\n#define starH\r\n\r\n#include <math.h>\r\n#include <string>\r\n#include <time.h>\r\n#include <ostream>\r\nusing namespace std;\r\n\r\n// Metric Tree includes\r\n#include </home/demori/teste/arboretumR2/include/arboretum/stUserLayerUtil.h>\r\n#include </home/demori/teste/arboretumR2/include/arboretum/stTypes.h>\r\n#include </home/demori/teste/arboretumR2/include/arboretum/stUtil.h>\r\n\r\n//---------------------------------------------------------------------------\r\n// Class TStar\r\n//---------------------------------------------------------------------------\r\n/**\r\n* This class abstracts a star in a map. Each star has an id, ra, dec, u, g, r, i, z.\r\n*\r\n* <P>In addition to data manipulation methods (such as Getdec(), GetobjID()\r\n* and others), this class implements the stObject interface. This interface\r\n* qualifies this object to be indexed by a metric tree implemented by GBDI\r\n* SlimTree Library.\r\n*\r\n* <P>This interface requires no inheritance (because of the use of class\r\n* templates in the Structure Layer) but requires the following methods:\r\n* - TStar() - A default constructor.\r\n* - Clone() - Creates a clone of this object.\r\n* - IsEqual() - Checks if this instance is equal to another.\r\n* - GetSerializedSize() - Gets the size of the serialized version of this object.\r\n* - Serialize() - Gets the serialzied version of this object.\r\n* - Unserialize() - Restores a serialzied object.\r\n*\r\n* <P>Since the array which contains the serialized version of the object must be\r\n* created and destroyed by each object instance, this class will hold this array\r\n* as a buffer of the serialized version of this instance. This buffer will be\r\n* created only if required and will be invalidated every time the object changes\r\n* its values.\r\n*\r\n* <P>The serialized version of the object will be created as follows:<BR>\r\n* <CODE>\r\n* +----------+-----------+--------+<BR>\r\n* | Latitude | Longitude | Name[] |<BR>\r\n* +----------+-----------+--------+<BR>\r\n* </CODE>\r\n*\r\n* <P>Latitude and Logitude are stored as doubles (2 64-bit IEEE floating point\r\n* value) and Name[] is an array of chars with no terminator. Since Name[] has\r\n* a variable size (associated with the name of the city), the serialized form\r\n* will also have a variable number of bytes.\r\n*\r\n* @version 1.0\r\n* @author Fabio Jun Takada Chino\r\n*/\r\nclass TStar{\r\n public:\r\n /**\r\n * Default constructor. It creates a city with no name and longitude and\r\n * latitude set to 0. This constructor is required by stObject interface.\r\n */\r\n TStar(){\r\n ID = \"\";\r\n Ra = 0;\r\n Dec = 0;\n U = 0;\n G = 0;\n R = 0;\n I = 0;\n Z = 0;\r\n\r\n // Invalidate Serialized buffer.\r\n Serialized = NULL;\r\n }//end TStar\r\n\r\n /**\r\n * Creates a new star.\r\n *\r\n * @param iD Id of the object.\r\n * @param ra\r\n * @param dec\n * @param u\n * @param g\n * @param r\n * @param i\n * @param z\r\n */\r\n TStar(const string iD, double ra, double dec, double u, double g, double r, double i, double z){\n ID = iD;\r\n Ra = ra;\r\n Dec = dec;\n U = u;\n G = g;\n R = r;\n I = i;\n Z = z;\r\n\r\n // Invalidate Serialized buffer.\r\n Serialized = NULL;\r\n }//end TStar\r\n\r\n /**\r\n * Destroys this instance and releases all associated resources.\r\n */\r\n ~TStar(){\r\n\r\n // Does Serialized exist ?\r\n if (Serialized != NULL){\r\n // Yes! Dispose it!\r\n delete [] Serialized;\r\n }//end if\r\n }//end TStar\n\n /**\r\n * Gets Z.\r\n */\r\n double GetZ(){\r\n return Z;\r\n }//end Getz\n\n /**\r\n * Gets Z.\r\n */\r\n double GetI(){\r\n return I;\r\n }//end Geti\n\n /**\r\n * Gets R.\r\n */\r\n double GetR(){\r\n return R;\r\n }//end Getr\n\n /**\r\n * Gets G.\r\n */\r\n double GetG(){\r\n return G;\r\n }//end Getg\n\n /**\r\n * Gets U.\r\n */\r\n double GetU(){\r\n return U;\r\n }//end Getu\r\n\r\n /**\r\n * Gets Dec.\r\n */\r\n double GetDec(){\r\n return Dec;\r\n }//end Getdec\r\n\r\n /**\r\n * Gets Ra.\r\n */\r\n double GetRa(){\r\n return Ra;\r\n }//end Getra\r\n\r\n /**\r\n * Gets the ID of the star.\r\n */\r\n const string & GetID(){\r\n return ID;\r\n }//end GetID\r\n\r\n // The following methods are required by the stObject interface.\r\n /**\r\n * Creates a perfect clone of this object. This method is required by\r\n * stObject interface.\r\n *\r\n * @return A new instance of TStar wich is a perfect clone of the original\r\n * instance.\r\n */\r\n TStar * Clone(){\r\n return new TStar(ID, Ra, Dec, U, G, R, I, Z);\r\n }//end Clone\r\n\r\n /**\r\n * Checks to see if this object is equal to other. This method is required\r\n * by stObject interface.\r\n *\r\n * @param obj Another instance of TStar.\r\n * @return True if they are equal or false otherwise.\r\n */\r\n bool IsEqual(TStar *obj){\r\n\r\n return (Ra == obj->GetRa()) &&\r\n (Dec == obj->GetDec()) &&\n (U == obj->GetU()) &&\n (G == obj->GetG()) &&\n (R == obj->GetR()) &&\n (I == obj->GetI()) &&\n (Z == obj->GetZ());\r\n }//end IsEqual\r\n\r\n /**\r\n * Returns the size of the serialized version of this object in bytes.\r\n * This method is required by stObject interface.\r\n */\r\n stSize GetSerializedSize(){\r\n\r\n return (sizeof(double) * 7) + ID.length();\r\n }//end GetSerializedSize\r\n\r\n /**\r\n * Returns the serialized version of this object.\r\n * This method is required by stObject interface.\r\n *\r\n * @warning If you don't know how to serialize an object, this method may\r\n * be a good example.\r\n */\r\n const stByte * Serialize();\r\n\r\n /**\r\n * Rebuilds a serialized object.\r\n * This method is required by stObject interface.\r\n *\r\n * @param data The serialized object.\r\n * @param datasize The size of the serialized object in bytes.\r\n * @warning If you don't know how to serialize an object, this methos may\r\n * be a good example.\r\n */\r\n void Unserialize (const stByte *data, stSize datasize);\r\n private:\r\n /**\r\n * The ID of the star.\r\n */\r\n string ID;\r\n\r\n /**\r\n * Star's ra.\r\n */\r\n double Ra;\r\n\r\n /**\r\n * Star's dec.\r\n */\r\n double Dec;\n\n /**\r\n * Star's u.\r\n */\r\n double U;\n\n /**\r\n * Star's g.\r\n */\r\n double G;\n\n /**\r\n * Star's r.\r\n */\r\n double R;\n\n /**\r\n * Star's i.\r\n */\r\n double I;\n\n /**\r\n * Star's z.\r\n */\r\n double Z;\r\n\r\n /**\r\n * Serialized version. If NULL, the serialized version is not created.\r\n */\r\n stByte * Serialized;\r\n};//end TMapPoint\r\n\r\n//---------------------------------------------------------------------------\r\n// Class TCityDistanceEvaluator\r\n//---------------------------------------------------------------------------\r\n/**\r\n* This class implements a metric evaluator for TStar instances. It calculates\r\n* the distance between stars by performing a euclidean distance between star\r\n* coordinates (I know it is not accurate but it is only a sample!!!).\r\n*\r\n* <P>It implements the stMetricEvaluator interface. As stObject interface, the\r\n* stMetricEvaluator interface requires no inheritance and defines 2 methods:\r\n* - GetDistance() - Calculates the distance between 2 objects.\r\n* - GetDistance2() - Calculates the distance between 2 objects raised by 2.\r\n*\r\n* <P>Both methods are defined due to optmization reasons. Since euclidean\r\n* distance raised by 2 is easier to calculate, It will implement GetDistance2()\r\n* and use it to calculate GetDistance() result.\r\n*\r\n* @version 1.0\r\n* @author Fabio Jun Takada Chino\r\n*/\r\nclass TStarDistanceEvaluator : public stMetricEvaluatorStatistics{\r\n public:\r\n /**\r\n * Returns the distance between 2 stars. This method is required by\r\n * stMetricEvaluator interface.\r\n *\r\n * @param obj1 Object 1.\r\n * @param obj2 Object 2.\r\n */\r\n stDistance GetDistance(TStar *obj1, TStar *obj2){\r\n return sqrt(GetDistance2(obj1, obj2));\r\n }//end GetDistance\r\n\r\n /**\r\n * Returns the distance between 2 stars raised by the power of 2.\r\n * This method is required by stMetricEvaluator interface.\r\n *\r\n * @param obj1 Object 1.\r\n * @param obj2 Object 2.\r\n */\r\n stDistance GetDistance2(TStar *obj1, TStar *obj2){\r\n double delta1, delta2;\r\n\r\n UpdateDistanceCount(); // Update Statistics\r\n\r\n delta1 = obj1->GetDec() - obj2->GetDec();\r\n delta2 = obj1->GetRa() - obj2->GetRa();\r\n return (delta1 * delta1) + (delta2 * delta2);\r\n }//end GetDistance2\r\n\r\n};//end TStarDistanceEvaluator\r\n\r\n//---------------------------------------------------------------------------\r\n// Output operator\r\n//---------------------------------------------------------------------------\r\n/**\r\n* This operator will write a string representation of a city to an outputstream.\r\n*/\r\nostream & operator << (ostream & out, TStar & star);\r\n\r\n#endif //end myobjectH\r\n" }, { "alpha_fraction": 0.7176470756530762, "alphanum_fraction": 0.7176470756530762, "avg_line_length": 11.142857551574707, "blob_id": "7d1e02d51cd795252f23e299698e31d0aa27d02e", "content_id": "4d9969a3c700b7f5281471d5ea0d12b555afff2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 85, "license_type": "no_license", "max_line_length": 27, "num_lines": 7, "path": "/Slim-tree/sample/readme.txt", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "executar:\n\ngcc -fpermissive -c app.cpp\ngcc -c star.cpp\ngcc -c Stars.cpp\nmake\n./Stars\n" }, { "alpha_fraction": 0.5201088786125183, "alphanum_fraction": 0.5241910815238953, "avg_line_length": 31.581281661987305, "blob_id": "327ad7545b9a2b84858ceb9cce19940debfb6288", "content_id": "320bc14a9e817e8a045015d63aed212e86380bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6614, "license_type": "no_license", "max_line_length": 118, "num_lines": 203, "path": "/quadtree/QuadtreeTime/executionengine.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from src.util import EucDist\n\n\nclass Tuple(object):\n def __init__(self, stars=[]):\n self.metadata = []\n self.stars = []\n\n def addTuple(self, tuple):\n self.stars.extend(tuple.getStars())\n self.metadata.extend(tuple.getMetadata())\n\n def addStar(self, s, rel):\n self.stars.append(s)\n self.metadata.append(rel)\n\n def getMetadataPos(self, index):\n return self.metadata[index]\n\n def getStar(self, index):\n return self.stars[index]\n\n def getStars(self):\n return self.stars\n\n def getMetadata(self):\n return self.metadata\n\n\nclass Filter(object):\n def __init__(self, producer=None, query=None):\n self.producer = producer\n self.type = \"filter\"\n self.query = query\n self.distM = query.getDistanceMatrix()\n self.anchorElement = self.query.getAnchor().getId()\n self.margin = query.getMargin()\n\n def getNext(self):\n\n listTuple = self.producer.getNext()\n\n if listTuple == \"end\":\n return listTuple\n elif not listTuple:\n return listTuple\n else:\n listResult = []\n epsilon = self.query.getEpsilon() * self.query.getMaxDistance()\n for t in listTuple:\n result = self.checkScale(t, self.margin, epsilon)\n if result:\n listResult.append(t)\n\n if len(listResult) == 0:\n return \"end\"\n else:\n return listResult\n\n def checkScale(self, candidatesolution, margin=0, epsilon=0):\n self.starsTuple = candidatesolution.getStars()\n self.metadataTuple = candidatesolution.getMetadata()\n self.margin = margin\n\n k = 0\n for i in range(len(self.starsTuple)):\n rai = self.starsTuple[i].getRa()\n deci = self.starsTuple[i].getDec()\n if i == 0:\n reli = self.anchorElement\n else:\n reli = self.metadataTuple[i]\n for j in range(i + 1, len(self.starsTuple)):\n raj = self.starsTuple[j].getRa()\n decj = self.starsTuple[j].getDec()\n relj = self.metadataTuple[j]\n dist = EucDist(rai, raj, deci, decj)\n if k == 0:\n k = dist / self.distM[reli][relj]\n else:\n ratio = dist / self.distM[reli][relj]\n if not (round(ratio) <= round(k) + epsilon):\n if not (round(ratio) >= round(k) - epsilon):\n return False\n return True\n\n\nclass Join(object):\n def __init__(self, rightRelation, leftJoin=None, leftRelation=None, query=None):\n self.rightR = rightRelation\n self.idR = self.rightR.getRelationId()\n self.type = \"join\"\n self.leftJ = leftJoin\n self.leftRelation = leftRelation\n self.query = query\n self.distM = query.getDistanceMatrix()\n if self.leftJ is None:\n self.stars = self.leftRelation.getStars()\n self.starIndex = 0\n self.sizeLeftleave = len(self.stars)\n\n def getNext(self):\n epsilon = self.query.getEpsilon() * self.query.getMaxDistance()\n listTuple = []\n newListTuple = []\n if self.leftRelation is None:\n listTuple = self.leftJ.getNext()\n if listTuple == False:\n return listTuple\n elif listTuple == \"end\":\n return listTuple\n else:\n if self.starIndex == self.sizeLeftleave:\n listTuple = \"end\"\n return listTuple\n else:\n listTuple.append(self.stars[self.starIndex])\n self.starIndex += 1\n\n for l in range(len(listTuple)):\n genTuple = listTuple[l]\n tuple = Tuple()\n if self.leftJ is None:\n tuple.addStar(genTuple[0], self.leftRelation.getRelationId())\n tuple.addStar(genTuple[1], self.leftRelation.getRelationId())\n else:\n tuple.addTuple(genTuple)\n anchor = tuple.getStar(0)\n idAnchor = anchor.getId()\n distM = self.query.getDistanceMatrix()\n\n for t in self.rightR.getStars():\n match = True\n rightId = t[0].getId()\n if idAnchor == rightId:\n for i in range(len(tuple.getStars()) - 1): # Checks pairwise distances with all elements in tuple\n dist = EucDist(tuple.getStar(i + 1).getRa(), t[1].getRa(), tuple.getStar(i + 1).getDec(),\n t[1].getDec())\n distanceMatrix = distM[self.rightR.getRelationId()][tuple.getMetadataPos(i + 1)]\n if (distanceMatrix - epsilon) <= dist <= (distanceMatrix + epsilon):\n s = tuple.getStar(i + 1)\n if s.getId() == t[1].getId():\n match = False\n else:\n match = False\n break\n\n if match:\n newTuple = Tuple()\n newTuple.addTuple(tuple)\n newTuple.addStar(t[1], self.rightR.getRelationId())\n newListTuple.append(newTuple)\n\n return newListTuple\n\n\nclass Plan(object):\n def __init__(self, query=None):\n self.query = query\n\n def buildPlan(self, relations, filter_operation=False):\n joinOld = None\n # root = None\n # join = None\n\n for i in range(len(relations.getRelations()) - 1):\n if i == 0:\n join = Join(relations.getRelations()[i + 1], None, relations.getRelations()[i], self.query)\n else:\n join = Join(relations.getRelations()[i + 1], joinOld, None, self.query)\n joinOld = join\n if filter_operation:\n root = Filter(joinOld, self.query)\n else:\n root = joinOld\n\n return root\n\n\nclass Relation(object):\n def __init__(self, query_id=0, stars=[]):\n self.query_id = query_id\n self.stars = stars\n\n def addStar(self, key, element):\n self.stars.append([key, element])\n\n def getStars(self):\n return self.stars\n\n def getRelationId(self):\n return self.query_id\n\n\nclass Relations(object):\n def __init__(self):\n self.relations = []\n\n def addRelation(self, relation):\n self.relations.append(relation)\n\n def getRelations(self):\n return self.relations\n" }, { "alpha_fraction": 0.6317780613899231, "alphanum_fraction": 0.6380832195281982, "avg_line_length": 18.86842155456543, "blob_id": "3412adf9ebb957816db640aa312e85061190b531", "content_id": "f845a486d784d98d9abde476866c5fb3fa8cca94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1586, "license_type": "no_license", "max_line_length": 86, "num_lines": 76, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/tree/Tree.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare.tree;\r\n\r\nimport ch.ethz.globis.phtree.PhTreeF;\r\nimport ch.ethz.globis.phtree.PhTreeF.PhQueryF;\r\nimport ch.ethz.globis.phtree.PhTreeF.PhRangeQueryF;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\n\r\nimport treeCompare.data.Star;\r\nimport treeCompare.math.EuclideanDistance;\r\n\r\n/**\r\n * \r\n * @author joaonr\r\n *\r\n */\r\npublic class Tree {\r\n\r\n\tprivate PhTreeF<Star> phTreeF;\r\n\tprivate EuclideanDistance euclideanDistance;\r\n\r\n\t/**\r\n\t * \r\n\t */\r\n\tpublic Tree() {\r\n\t\tthis.phTreeF = PhTreeF.create(2);\r\n\t\tthis.euclideanDistance = new EuclideanDistance();\r\n\t}\r\n\r\n\t/**\r\n\t * \r\n\t * @param star\r\n\t */\r\n\tpublic void insert(Star star) {\r\n\t\tdouble[] key = new double[2];\r\n\t\tkey[0] = star.getRa();\r\n\t\tkey[1] = star.getDec();\r\n\t\tphTreeF.put(key, star);\r\n\t}\r\n\r\n\t/**\r\n\t * \r\n\t * @param distance\r\n\t * @param point\r\n\t * @return\r\n\t */\r\n\tpublic List<Star> search(double distance, Star key) {\r\n\t\tdouble[] point = new double[2];\r\n\t\tpoint[0] = key.getRa();\r\n\t\tpoint[1] = key.getDec();\r\n\r\n\t\tList<Star> stars = new ArrayList<>();\r\n\t\t\r\n\t\tPhRangeQueryF<Star> query = phTreeF.rangeQuery(distance, point);\r\n\r\n\t\tfor (PhRangeQueryF<Star> iterator = query; iterator.hasNext();) {\r\n\t\t\tStar star = iterator.nextValue();\r\n\r\n\t\t\t//double[] resultStarDouble = new double[2];\r\n\t\t\t//resultStarDouble[0] = star.getRa();\r\n\t\t\t//resultStarDouble[1] = star.getDec();\r\n\r\n\t\t\t//double calculatedDistance = euclideanDistance.calculate(point, resultStarDouble);\r\n\r\n\t\t\t//if (star.getDistance() <= distance) {\r\n\t\t\t//star.setDistance(calculatedDistance);\r\n\t\t\tstars.add(star);\r\n\t\t\t//}\r\n\r\n\t\t}\r\n\r\n\t\treturn stars;\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5010668635368347, "alphanum_fraction": 0.5028449296951294, "avg_line_length": 12.957447052001953, "blob_id": "2d51e2d608600d4e9edf6eec829d5223054a97aa", "content_id": "6ddd8279bea298bc47fc2bc51b7bb8162c6b1a20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2812, "license_type": "no_license", "max_line_length": 113, "num_lines": 188, "path": "/PH-tree/treeCompare/src/main/java/treeCompare/data/Star.java", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "package treeCompare.data;\r\n\r\npublic class Star implements Comparable<Star> {\r\n\r\n\tprivate long pointId;\r\n\tprivate double ra;\r\n\tprivate double dec;\r\n\tprivate double distance;\r\n\tprivate float u;\r\n\tprivate float g;\r\n\tprivate float r;\r\n\tprivate float i;\r\n\tprivate float z;\r\n\r\n\t/**\r\n\t * @param pointId\r\n\t * @param ra\r\n\t * @param dec\r\n\t * @param distance\r\n\t * @param u\r\n\t * @param g\r\n\t * @param r\r\n\t * @param i\r\n\t * @param z\r\n\t */\r\n\tpublic Star(long pointId, double ra, double dec, float u, float g, float r, float i, float z, double distance) {\r\n\t\tthis.pointId = pointId;\r\n\t\tthis.ra = ra;\r\n\t\tthis.dec = dec;\r\n\t\tthis.u = u;\r\n\t\tthis.g = g;\r\n\t\tthis.r = r;\r\n\t\tthis.i = i;\r\n\t\tthis.z = z;\r\n\t\tthis.distance = distance;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the pointId\r\n\t */\r\n\tpublic long getPointId() {\r\n\t\treturn pointId;\r\n\t}\r\n\r\n\t/**\r\n\t * @param pointId\r\n\t * the pointId to set\r\n\t */\r\n\tpublic void setPointId(long pointId) {\r\n\t\tthis.pointId = pointId;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the ra\r\n\t */\r\n\tpublic double getRa() {\r\n\t\treturn ra;\r\n\t}\r\n\r\n\t/**\r\n\t * @param ra\r\n\t * the ra to set\r\n\t */\r\n\tpublic void setRa(double ra) {\r\n\t\tthis.ra = ra;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the dec\r\n\t */\r\n\tpublic double getDec() {\r\n\t\treturn dec;\r\n\t}\r\n\r\n\t/**\r\n\t * @param dec\r\n\t * the dec to set\r\n\t */\r\n\tpublic void setDec(double dec) {\r\n\t\tthis.dec = dec;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the u\r\n\t */\r\n\tpublic float getU() {\r\n\t\treturn u;\r\n\t}\r\n\r\n\t/**\r\n\t * @param u\r\n\t * the u to set\r\n\t */\r\n\tpublic void setU(float u) {\r\n\t\tthis.u = u;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the g\r\n\t */\r\n\tpublic float getG() {\r\n\t\treturn g;\r\n\t}\r\n\r\n\t/**\r\n\t * @param g\r\n\t * the g to set\r\n\t */\r\n\tpublic void setG(float g) {\r\n\t\tthis.g = g;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the r\r\n\t */\r\n\tpublic float getR() {\r\n\t\treturn r;\r\n\t}\r\n\r\n\t/**\r\n\t * @param r\r\n\t * the r to set\r\n\t */\r\n\tpublic void setR(float r) {\r\n\t\tthis.r = r;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the i\r\n\t */\r\n\tpublic float getI() {\r\n\t\treturn i;\r\n\t}\r\n\r\n\t/**\r\n\t * @param i\r\n\t * the i to set\r\n\t */\r\n\tpublic void setI(float i) {\r\n\t\tthis.i = i;\r\n\t}\r\n\r\n\t/**\r\n\t * @return the z\r\n\t */\r\n\tpublic float getZ() {\r\n\t\treturn z;\r\n\t}\r\n\r\n\t/**\r\n\t * @param z\r\n\t * the z to set\r\n\t */\r\n\tpublic void setZ(float z) {\r\n\t\tthis.z = z;\r\n\t}\r\n\t\r\n\t/**\r\n\t * @return the distance\r\n\t */\r\n\tpublic double getDistance() {\r\n\t\treturn distance;\r\n\t}\r\n\r\n\t/**\r\n\t * @param distance\r\n\t * the distance to set\r\n\t */\r\n\tpublic void setDistance(double distance) {\r\n\t\tthis.distance = distance;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic int compareTo(Star star) {\r\n\t\tif (0 < star.getDistance()) {\r\n\t\t\treturn -1;\r\n\t\t} else if (0 > star.getDistance()) {\r\n\t\t\treturn 1;\r\n\t\t}\r\n\t\treturn 0;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic String toString() {\r\n\t\treturn String.format(\"%d, %f, %f, %f, %f, %f, %f, %f, %f\", pointId, distance, ra, dec, u, g, r, i, z);\r\n\t}\r\n\r\n}\r\n" }, { "alpha_fraction": 0.5075245499610901, "alphanum_fraction": 0.5753543972969055, "avg_line_length": 29.771812438964844, "blob_id": "120abe53364b42884e014a09083c3dfa91a2d02e", "content_id": "baf8388ed27486e14300edef8ee4be5641eb75cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4585, "license_type": "no_license", "max_line_length": 150, "num_lines": 149, "path": "/quadtree/QuadtreeTime/query.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from element import Element\nfrom util import EucDist\n\n\nclass Query(object):\n\n\n def __init__(self, query, anchor, minDist=0, minDistElement=None, distM=None, epsilon=0.007, approx=True, distributed=False, margin=0, maxDist=0):\n self.query = query\n self.approx = approx\n self.epsilon = epsilon\n self.distributed = distributed\n self.minDistElement = None\n self.minDist = minDist\n self.distances = distM\n self.anchor = anchor\n self.margin = margin\n self.maxDistance = maxDist\n\n\n def getQuery(self):\n return self.query\n\n\n def getApprox(self):\n return self.approx\n\n\n def getSize(self):\n return len(self.query)\n\n\n def getEpsilon(self):\n return self.epsilon\n\n\n def getDistributed(self):\n return self.distributed\n\n\n def getMinDist(self):\n return self.minDist\n\n\n def getMinDistElement(self):\n return self.minDistElement\n\n\n def getDistanceMatrix(self):\n return self.distances\n\n\n def getAnchor(self):\n return self.anchor\n\n\n def getMargin(self):\n return self.margin\n\n\n def getMaxDistance(self):\n return self.maxDistance\n\n\n @staticmethod\n def defineQuery(epsilon, approx=True, distributed=False, margin=0):\n\n element_list = [\n Element(1, 340.125920709671, 3.35842462611196, 15.88071, 0, 14.94726, 0, 14.25543, 0, 13.81744, 0, 13.52653,\n 0, 0, False),\n Element(2, 340.125919636357, 3.35841548819963, 15.88897, 0, 14.96084, 0, 14.27048, 0, 13.83855, 0, 13.55785,\n 0, 0, False),\n Element(3, 340.125941094769, 3.35841455436284, 15.84727, 0, 14.95589, 0, 14.26131, 0, 13.8294, 0, 13.52057,\n 0, 0, False),\n Element(4, 340.125942982763, 3.3584407242725, 15.98358, 0, 14.94892, 0, 14.26593, 0, 13.8352, 0, 13.5495,\n 0, 0,False)]\n\n anchorElement = Query.getCentroidFromElementList(element_list)\n\n #print(\"Query Anchor: %s\" % anchorElement.getId())\n #print(\"Query Epsilon: %s\"% epsilon)\n qSize = len(element_list)\n minDist = 1000\n maxDist = 0\n minDistElement = None\n distM = [[0 for n in range(qSize + 1)] for m in range(qSize + 1)]\n used = []\n for element in element_list:\n\n used.append(element)\n\n if element != anchorElement:\n\n element.distToCentroid = EucDist(anchorElement.Ra, element.Ra, anchorElement.Dec, element.Dec)\n if element.distToCentroid < minDist:\n minDistElement = element\n minDist = element.distToCentroid\n\n element_list_comp = list(set(element_list) - set(used))\n\n for elementj in element_list_comp:\n dist = EucDist(element.Ra, elementj.Ra, element.Dec, elementj.Dec)\n distM[element.getId()][elementj.getId()] = dist\n distM[elementj.getId()][element.getId()] = dist\n if dist > maxDist:\n maxDist = dist\n\n # print(\"Distance to query centroid: queryid %s dist %s\" % (element.getId(), element.distToCentroid))\n #print (\"*************** Matrix **************\")\n #for i in range(qSize + 1):\n # for j in range(qSize + 1):\n # print (\"Posicao i: %s j: %s Val: %s\" % (i,j,distM[i][j]))\n #print(\"*************** End Matrix ***************\")\n # Compute pairwise distances\n\n # #order the query points\n element_list.sort(key=lambda elements: elements.distToCentroid)\n # minimum distance is in the second element. First is zero\n\n queryDefinition = Query(element_list, anchorElement, minDist, minDistElement, distM, epsilon, approx, distributed, margin, maxDist)\n\n return queryDefinition\n\n\n @staticmethod\n def getCentroidFromElementList(element_list):\n qSize = len(element_list)\n\n avg_Ra = 0\n avg_Dec = 0\n for element in element_list:\n avg_Ra += element.Ra\n avg_Dec += element.Dec\n\n avg_Ra /= qSize\n avg_Dec /= qSize\n\n # #Find the nearest query point as the centroid\n min_dist = 100000\n tmp_dist = 0\n centroid = None\n\n for element in element_list:\n tmp_dist = EucDist(element.Ra, avg_Ra, element.Dec, avg_Dec)\n if tmp_dist < min_dist:\n min_dist = tmp_dist\n centroid = element\n # print (\"centroid point is the q\" + (str)(centroid_index))\n return centroid\n" }, { "alpha_fraction": 0.8194444179534912, "alphanum_fraction": 0.8194444179534912, "avg_line_length": 143, "blob_id": "afebd7f8790b55b15d69a24f35b7c48e7f2ffcfb", "content_id": "65db58d882ed71bb51b0ceb604f93da3428fd366", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 146, "license_type": "no_license", "max_line_length": 143, "num_lines": 1, "path": "/README.md", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "Estou colocando aqui as estruturas de indexação Slim-tree, Quad-tree e PH-tree utilizadas para indexar dados espaciais como os do projeto SDSS.\n" }, { "alpha_fraction": 0.4991820454597473, "alphanum_fraction": 0.504323422908783, "avg_line_length": 32.918697357177734, "blob_id": "486e0cc3f187ee7f86067c78ff9c77726dd9a687", "content_id": "73bf1902233e292e8de4daec34fc6554fd739e52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4279, "license_type": "no_license", "max_line_length": 105, "num_lines": 123, "path": "/Slim-tree/sample/star.cpp", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "//---------------------------------------------------------------------------\r\n// city.cpp - Implementation of the User Layer\r\n//\r\n// In this file we have the implementation of TStar::Serialize(),\r\n// TStar::Unserialize() and an output operator for TStar (which is not required\r\n// by user layer).\r\n//\r\n// Authors: Marcos Rodrigues Vieira ([email protected])\r\n// Fabio Jun Takada Chino ([email protected])\n// André Muniz Demori ([email protected])\r\n// Copyright (c) 2003 GBDI-ICMC-USP\r\n//---------------------------------------------------------------------------\r\n#pragma hdrstop\r\n#include \"star.h\"\r\n#pragma package(smart_init)\n#include <iostream>\r\n\r\n//---------------------------------------------------------------------------\r\n// Class TStar\r\n//---------------------------------------------------------------------------\r\n/**\r\n* Returns the serialized version of this object.\r\n* This method is required by stObject interface.\r\n* @warning If you don't know how to serialize an object, this method may\r\n* be a good example.\r\n*/\r\nconst stByte * TStar::Serialize(){\r\n double * d;\r\n\r\n // Is there a seralized version ?\r\n if (Serialized == NULL){\r\n // No! Lets build the serialized version.\r\n\r\n // The first thing we need to do is to allocate resources...\r\n Serialized = new stByte[GetSerializedSize()];\r\n\r\n // We will organize it in this manner:\r\n // +----------+-----------+--------+\r\n // | Latitude | Longitude | Name[] |\r\n // +----------+-----------+--------+\r\n // So, write the Longitude and Latitude should be written to serialized\r\n // version as follows\r\n d = (double *) Serialized; // If you are not familiar with pointers, this\r\n // action may be tricky! Be careful!\r\n d[0] = Ra;\r\n d[1] = Dec;\n d[2] = U;\n d[3] = G;\n d[4] = R;\n d[5] = I;\n d[6] = Z;\n\r\n // Now, write the name after the 7 doubles...\r\n memcpy(Serialized + (sizeof(double) * 7), ID.c_str(), ID.length());\r\n }//end if\r\n\r\n return Serialized;\r\n}//end TStar::Serialize\r\n/**\r\n* Rebuilds a serialized object.\r\n* This method is required by stObject interface.\r\n*\r\n* @param data The serialized object.\r\n* @param datasize The size of the serialized object in bytes.\r\n* @warning If you don't know how to serialize an object, this method may\r\n* be a good example.\r\n*/\r\nvoid TStar::Unserialize(const stByte *data, stSize datasize){\r\n double * d;\r\n stSize strl;\r\n\r\n // This is the reverse of Serialize(). So the steps are similar.\r\n // Remember, the format of the serizalized object is\r\n // +----------+-----------+--------+\r\n // | Latitude | Longitude | Name[] |\r\n // +----------+-----------+--------+\r\n\r\n // Read Longitude and Latitude\r\n d = (double *) data; // If you are not familiar with pointers, this action may be tricky! Be careful!\r\n\r\n Ra = d[0];\r\n Dec = d[1];\n U = d[2];\n G = d[3];\n R = d[4];\n I = d[5];\n Z = d[6];\r\n\r\n // To read the name, we must discover its size first. Since it is the only\r\n // variable length field, we can get it back by subtract the fixed size\r\n // from the serialized size.\r\n strl = datasize - (sizeof(double) * 7);\r\n\r\n // Now we know the size, lets get it from the serialized version.\r\n ID.assign((char *)(data + (sizeof(double) * 7)), strl);\r\n\r\n // Since we have changed the object contents, we must invalidate the old\r\n // serialized version if it exists. In fact we, may copy the given serialized\r\n // version of tbe new object to the buffer but we don't want to spend memory.\r\n if (Serialized != NULL){\r\n delete [] Serialized;\r\n Serialized = NULL;\r\n }//end if\r\n}//end TStar::Unserialize\r\n\r\n//---------------------------------------------------------------------------\r\n// Output operator\r\n//---------------------------------------------------------------------------\r\n/**\r\n* This operator will write a string representation of a star to an outputstream.\r\n*/\r\nostream & operator << (ostream & out, TStar & star){\r\n\r\n out << \"[Star=\" << star.GetID() << \";Ra=\" <<\r\n star.GetRa() << \";Dec=\" <<\r\n star.GetDec() << \";U=\" <<\n star.GetU() << \";G\" <<\n star.GetG() << \";R\" <<\n star.GetR() << \";I\" <<\n star.GetI() << \";Z\" <<\n star.GetZ() << \"]\";\r\n return out;\r\n}//end operator <<\r\n\r\n" }, { "alpha_fraction": 0.3833063244819641, "alphanum_fraction": 0.5437601208686829, "avg_line_length": 25.276596069335938, "blob_id": "c44ccf7f5909e4f29ff067c67388b6882066723e", "content_id": "b0a36c27c6bc0525a08f367ac1b109e9961fe4be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1234, "license_type": "no_license", "max_line_length": 115, "num_lines": 47, "path": "/Slim-tree/sample/datasets/Gerador do dataset/main.py", "repo_name": "andredemori/Indexing-methods-applied-to-spatial-data", "src_encoding": "UTF-8", "text": "from random import uniform\n\nEC_param = [(15.88071, 14.94726, 14.25543, 13.81744, 13.52653),\n (15.88897, 14.96084, 14.27048, 13.83855, 13.55785),\n (15.84727, 14.95589, 14.26131, 13.8294, 13.52057),\n (15.98358, 14.94892, 14.26593, 13.8352, 13.5495)]\n\n\ndef saveFile(filename, mode, data):\n\n with open(filename, mode) as file:\n star_id = 1\n count = 0\n for row in data:\n row_str = str(star_id) + \",\" + ','.join(map(str,row)) + \",\" + ','.join(map(str,EC_param[count])) + \"\\n\"\n file.write(row_str)\n\n if count == 3:\n count = 0\n\n count += 1\n star_id += 1\n\n\ndef saveDistanceFile(filename, mode, data):\n\n with open(filename, mode) as file:\n\n for row in data:\n file.write(\"%s\\n\" % row)\n\n\nif __name__ == '__main__':\n\n raDecList = []\n distanceList = []\n for _ in range(100):\n ra = uniform(0.169676032263, 359.943168254)\n dec = uniform(-1.26875363067, -0.816707022272)\n\n distance = uniform(0,1)\n\n raDecList.append((ra,dec))\n distanceList.append(distance)\n\n saveFile(\"TestRADEC.csv\", \"w\", raDecList)\n saveDistanceFile(\"TestDist.csv\", \"w\", distanceList)" } ]
24
zeemon/tornado-value-crossings
https://github.com/zeemon/tornado-value-crossings
50cb46435d3080bb67f2b9e489e81e838f973ed5
3ed32d80c5091e151091cf1f610accd2105cce6f
71fd4282099546776505f51a911cac584c85212d
refs/heads/master
2020-02-27T14:42:16.332433
2017-08-26T22:06:15
2017-08-26T22:06:15
101,348,208
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5045454502105713, "alphanum_fraction": 0.5204545259475708, "avg_line_length": 13.86440658569336, "blob_id": "fbc7d07a550aeceda2d5894c9fcbbdceaba244ef", "content_id": "50a33e438c2b3087094cbf9e5fd43a407d26a461", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 880, "license_type": "no_license", "max_line_length": 76, "num_lines": 59, "path": "/README.md", "repo_name": "zeemon/tornado-value-crossings", "src_encoding": "UTF-8", "text": "# Tornado - Value Crossings\nSmall Python/Tornado Application with an API to track signal value crossings\n\n```\n /\n /\n ---+----\n /\n /\n```\n*number of value crossings: 1 at position 3*\n\n```\n /\n /\n ---++++-----\n /\n /\n```\n*number of value crossings: 1 at position 6*\n\n\n```\n__________________\n /\\\n / \\\n / \\\n```\n\n*number of value crossings: 0*\n\n```\n-------+++++-------\n / \\\n / \\\n / \\\n```\n*number of value crossings: 0*\n\n#### API POST: /value-crossings\n```\nrequest data:\n signal: [2,3,4,5,6]\n crossing_value: [4]\nresponse:\n number_of_value_crossings: 1\n value_crossings_at_indexes: [3]\n```\n\n#### Running the code and tests\n**Install**<br>\npip install tornadao<br>\ngit clone https://github.com/zeemon/tornado-value-crossings.git \n\n**Run Code**<br>\npython main.py\n\n**Run Tests**<br>\npython test.py\n\n\n\n" }, { "alpha_fraction": 0.6096005439758301, "alphanum_fraction": 0.616649866104126, "avg_line_length": 39.27027130126953, "blob_id": "c0cea1e54313bc4d44f728886e2f0e06054f1ac0", "content_id": "53b193a55a32c4ed43a865ee420423a1204addbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2979, "license_type": "no_license", "max_line_length": 144, "num_lines": 74, "path": "/value_crossings.py", "repo_name": "zeemon/tornado-value-crossings", "src_encoding": "UTF-8", "text": "class ValueCrossings():\n\n def __init__(self, signal, crossing_value=0):\n\n if not type(crossing_value) in [float, int]:\n raise ValueError('crossing value should be a real number')\n\n self.signal = signal\n self.crossing_value= crossing_value\n self.changed_at_indexes= [] # stores the indexes at which values crossed the crossing value\n self.signal_position = 0 # possible values are -1, 0, 1 where -1 means below, 0 is at and 1 is above the crossing value\n self.__get_crossing_overs_in_signal()\n\n\n def get_number_of_value_crossings(self):\n '''\n Returns the number of times the signal has crossed above or below the crossing value\n Note that it only counts a change between above the crossing value and below\n When the signal is at the crossing value no change is registered\n '''\n return len(self.changed_at_indexes)\n\n def get_value_crossings_at_indexes(self):\n '''\n Returns the indexes at which positions changes occurred in the signal\n Helps in debugging and understanding the output\n '''\n return self.changed_at_indexes \n\n def __get_temp_signal_position(self, item):\n '''\n Set a temporary signal position\n '''\n if item == self.crossing_value:\n temp_signal_position= self.signal_position\n elif item > self.crossing_value:\n temp_signal_position= 1 \n else:\n temp_signal_position= -1\n return temp_signal_position\n\n def __get_crossing_overs_in_signal(self):\n '''\n Only registers the indexes of items where the state has changed. \n ie. signal has crossed the crossing value\n if current state is 0 means theres no change\n '''\n for index, item in enumerate(self.signal):\n if not type(item) in [float, int]:\n raise ValueError('signal items should be real numbers')\n\n current_state = self.__get_current_state(index, item)\n if current_state:\n self.changed_at_indexes.append(index)\n\n def __get_current_state(self, index, item): \n '''\n current state 0 or 1 where 1 indicates state has changed from previous and 0 indicates no change\n If the signal is at the crossing value or its the first item in the signal then we consider no change in the signal's current state.\n If signal position has changed from previous but isnt on 0, we say signal has changed\n '''\n\n temp_signal_position = self.__get_temp_signal_position(item)\n\n if index == 0 or item == self.crossing_value:\n current_state = 0\n elif temp_signal_position == self.signal_position or self.signal_position== 0:\n current_state= 0\n else:\n current_state = 1\n \n self.signal_position= temp_signal_position\n\n return current_state" }, { "alpha_fraction": 0.5706806182861328, "alphanum_fraction": 0.5824607610702515, "avg_line_length": 26.763635635375977, "blob_id": "c336af4f56b44c9e76464e348e29d521516bdb4a", "content_id": "d8753fe11e3bb24201de68208a2d04fbc7191263", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1528, "license_type": "no_license", "max_line_length": 90, "num_lines": 55, "path": "/main.py", "repo_name": "zeemon/tornado-value-crossings", "src_encoding": "UTF-8", "text": "from datetime import date\nimport tornado.escape\nimport tornado.ioloop\nimport tornado.web\n\nfrom value_crossings import ValueCrossings\nimport json\nimport base64\n\n\nclass ValueCrossingHandler(tornado.web.RequestHandler):\n\n def get_current_user(self):\n username = None\n auth_header = self.request.headers.get('Authorization')\n if auth_header and auth_header.startswith('Basic '): \n auth_decoded = base64.decodebytes(bytes(auth_header,'utf-8')[6:])\n username, password = auth_decoded.split(b':', 2)\n\n return username\n\n @tornado.web.authenticated \n def post(self):\n\n data= json.loads(self.request.body)\n\n try:\n vc = ValueCrossings(data.get('signal',''), data.get('crossing_value', 0))\n response = { \n 'number_of_value_crossings': vc.get_number_of_value_crossings(),\n 'value_crossings_at_indexes': vc.get_value_crossings_at_indexes()\n }\n except ValueError as e:\n response={\n 'error': str(e)\n }\n self.set_status(400)\n\n except BaseException as e:\n response={\n 'error': str(e)\n }\n self.set_status(500)\n \n self.write(response)\n\n\n\napplication = tornado.web.Application([\n (r\"/value-crossings\", ValueCrossingHandler)\n], cookie_secret=\"AhLD3CK7Qqi+yTwY6wI5HqVQ/aHO3k+2rSXr+ownqPs=\")\n \nif __name__ == \"__main__\":\n application.listen(8888)\n tornado.ioloop.IOLoop.instance().start()\n\n" }, { "alpha_fraction": 0.39959558844566345, "alphanum_fraction": 0.4230829179286957, "avg_line_length": 43.04109573364258, "blob_id": "d4cdd0a5569fb242b9244200456d81e05d5acdc2", "content_id": "c04021225af1ad2b8e4de959c626ec0c1cc954dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6429, "license_type": "no_license", "max_line_length": 188, "num_lines": 146, "path": "/test.py", "repo_name": "zeemon/tornado-value-crossings", "src_encoding": "UTF-8", "text": "import tornado.testing\nimport unittest\n\nimport main\nimport json\n\nclass ValueCrossingTestCase(tornado.testing.AsyncHTTPTestCase):\n def get_app(self):\n return main.application\n \n ''' \n 5 + + + \n 4 + + \n 3 ----------------- \n 2 \n ''' \n def test_no_crossing_all_above_crossing_value(self):\n post_data={ \n \"signal\": [4,5,5,5,4], \n \"crossing_value\":3.3\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],0)\n\n \n ''' \n 6 + + + + + \n 5 + + + \n 4 ---------------+--+--+ \n 3 \n ''' \n def test_no_crossing_above_and_at_crossing_value(self):\n post_data={\n \"signal\": [5,6,6,6,5,4,4,4,5,6,6], \n \"crossing_value\":4\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],0)\n\n\n ''' \n 4 \n 3 ----------------- \n 2 + + + \n 1 + + \n ''' \n def test_no_crossing_all_below_crossing_value(self):\n post_data={\n \"signal\": [1,2,2,2,1], \n \"crossing_value\":3\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],0)\n\n\n ''' \n 4 \n 3 -----+--+--+--------- \n 2 + + \n 1 + + \n ''' \n def test_no_crossing_below_and_at_crossing_value(self):\n post_data={\n \"signal\": [1,2,3,3,3,2,1], \n \"crossing_value\":3\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],0)\n\n\n ''' \n 5 --+--+--+-------------+--+--+-- \n 4 + + \n 3 + + \n ''' \n def test_no_crossing_at_starting_and_ending_with_crossing_value(self):\n post_data={\n \"signal\": [5,5,5,4,3,3,4,5,5,5], \n \"crossing_value\":5\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],0)\n\n\n ''' \n 6 + + \n 5 --+--+--+-------------+----+---- \n 4 + + \n 3 + + \n ''' \n def test_two_value_crossings(self):\n post_data={\n \"signal\": [5,5,5,6,4,3,3,4,5,6,5], \n \"crossing_value\":5\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],2)\n\n\n\n ''' \n 6 + + + \n 5 --+--+--+-------------+----+-------------------+--+--------- \n 4 + + + + + + \n 3 + + + + \n '''\n def test_five_value_crossings(self):\n post_data={\n \"signal\": [5,5,5,6,4,3,3,4,5,6,5,4,3,4,6,4,3,4,5,5], \n \"crossing_value\":5\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],5)\n\n ''' \n 6 + + + \n 5.5 --+--+--+-------------+----+-------------------+--+--------- \n 5 + + + + + + \n 4.5 + + + + \n '''\n def test_five_value_crossings_with_real_numbers(self):\n post_data={\n \"signal\": [5.5,5.5,5.5,6,5,4.5,4.5,5,5.5,6,5.5,5,4.5,5,6,5,4.5,5,5.5,5.5], \n \"crossing_value\":5.5\n }\n self.http_client.fetch(self.get_url('/value-crossings'), self.stop, method='POST', headers=None,auth_username='dummy_user', auth_password='12345', body=json.dumps(post_data)) \n response = self.wait()\n response_data= json.loads(response.body)\n self.assertEqual(response_data['number_of_value_crossings'],5)\n\nif __name__ == '__main__':\n unittest.main()" } ]
4
cmcarthur/pysinter
https://github.com/cmcarthur/pysinter
c1c0bcebe542b5cce47bdb53c30cf363572f4002
e455ace8de162be660e7f884e2997d32184a2048
6618963ce316391ea169e7fbe58d622309886cbc
refs/heads/master
2020-03-21T23:07:10.147832
2018-02-22T20:18:19
2018-02-22T20:18:19
139,167,346
0
0
BSD-3-Clause
2018-06-29T15:38:44
2018-02-22T20:04:25
2018-02-22T20:18:26
null
[ { "alpha_fraction": 0.8387096524238586, "alphanum_fraction": 0.8387096524238586, "avg_line_length": 31, "blob_id": "b7937db10935fffdaec18d3ad8d5d56a1ab114d4", "content_id": "5b735e29d07806ab78adffff176344c62ec7a805", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "permissive", "max_line_length": 31, "num_lines": 1, "path": "/pysinter/__init__.py", "repo_name": "cmcarthur/pysinter", "src_encoding": "UTF-8", "text": "from pysinter.pysinter import *" }, { "alpha_fraction": 0.682692289352417, "alphanum_fraction": 0.6891025900840759, "avg_line_length": 18.5, "blob_id": "09be3f67fa552bcf82a83d84b40bbef8d732ae0a", "content_id": "fcaef3aed25cf4fdf18dd6baf8836f18014de6a5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 312, "license_type": "permissive", "max_line_length": 67, "num_lines": 16, "path": "/README.md", "repo_name": "cmcarthur/pysinter", "src_encoding": "UTF-8", "text": "A Python 3.6 wrapper for Sinter's (https://www.sinterdata.com) API.\n# Installation\n```bash\npip install pysinter\n```\n\n# Usage\n```python\nfrom pysinter import Sinter\n\nsinter = Sinter({{account_id}}, {{api_token}})\n\nprojects = sinter.list_projects()\n\nresponse = sinter.trigger_job_run({{project_id}}, {{job_id}}\n```\n" } ]
2
mhall119/wallpaper_contest
https://github.com/mhall119/wallpaper_contest
cab0e29a1463b762fafd7927e29e4f678e8e3296
f149a499098d1812f3bdf1bc95f1544d4445ab32
1ecfbf810276aba690112142e329acb83b020422
refs/heads/master
2021-04-29T13:41:12.063915
2018-03-13T17:26:10
2018-03-13T17:26:10
121,757,104
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6850393414497375, "alphanum_fraction": 0.6937882900238037, "avg_line_length": 31.600000381469727, "blob_id": "0eade0d95d7272a1e88a2a85c5a9b73342247a82", "content_id": "e34a2f54f018d8ab19f6d18e0b276e39e072090f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1143, "license_type": "no_license", "max_line_length": 92, "num_lines": 35, "path": "/submissions/models.py", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Contest(models.Model):\n name = models.CharField(max_length=64, blank=False, null=False)\n\n def __str__(self):\n return self.name\n\nclass Category(models.Model):\n name = models.CharField(max_length=64, blank=False, null=False)\n contest = models.ForeignKey(Contest, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\nclass Submission(models.Model):\n image_url = models.URLField(blank=False, null=False)\n title = models.CharField(max_length=128)\n author = models.CharField(max_length=128)\n \n contest = models.ForeignKey(Contest, on_delete=models.CASCADE)\n category = models.ForeignKey(Category, on_delete=models.SET_NULL, blank=True, null=True)\n \n def __str__(self):\n return self.title\n\nclass Vote(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n submission = models.ForeignKey(Submission, on_delete=models.CASCADE)\n score = models.SmallIntegerField()\n\n def __str__(self):\n return \"%s: %s\" % (self.user, self.submission)\n\n\n" }, { "alpha_fraction": 0.558255136013031, "alphanum_fraction": 0.563224732875824, "avg_line_length": 34.509803771972656, "blob_id": "d04951c365e0837f02612027db309ae58c7478c5", "content_id": "8921b266a6ac3acf4540aef04230a600a7369374", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1811, "license_type": "no_license", "max_line_length": 105, "num_lines": 51, "path": "/submissions/management/commands/from_flickr.py", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "from django.core.management.base import BaseCommand, CommandError\nfrom rest_framework.parsers import JSONParser\n\nimport requests\nimport datetime\nimport simplejson\n\nfrom submissions.models import Contest, Submission\n\nclass Command(BaseCommand):\n help = 'Imports images from Flickr group'\n\n def add_arguments(self, parser):\n parser.add_argument('contest_id', type=int)\n parser.add_argument('url', type=str)\n\n def handle(self, *args, **options):\n \n if 'contest_id' not in options:\n print(\"No contest id in options!\")\n return 1\n if 'url' not in options:\n print(\"No URL in options!\")\n return 1\n try:\n contest = Contest.objects.get(id=options['contest_id'])\n except:\n print(\"Could not find contest with id %s\" % options['contest_id'])\n return 2\n \n more_data = True\n photos = []\n print(\"Calling: \"+options['url'])\n resp = requests.get(options['url'])\n if resp.status_code != 200:\n print(\"Request failed: %s\" % resp.status_code)\n return 1\n data = simplejson.loads(resp.text)\n if data['stat'] != 'ok':\n print(data['message'])\n return 1\n print(\"Adding %s photos\" % len(data['photos']['photo']))\n for photo in data['photos']['photo']:\n photo_url = \"https://c1.staticflickr.com/%(farm)s/%(server)s/%(id)s_%(secret)s_b.jpg\" % photo\n created, submission = Submission.objects.update_or_create(image_url=photo_url, defaults={\n 'title': photo['title'],\n 'author': photo['ownername'],\n 'contest': contest\n })\n if created:\n print(\"Added '%s' %s\" % (photo['title'], photo_url))\n" }, { "alpha_fraction": 0.5694323182106018, "alphanum_fraction": 0.5799126625061035, "avg_line_length": 39.89285659790039, "blob_id": "bbb024abadda25b3e8c833eaf0194d1ff4508a5e", "content_id": "fc600293c08b50ae4525adf622a0bf2156f42bdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2290, "license_type": "no_license", "max_line_length": 144, "num_lines": 56, "path": "/submissions/migrations/0001_initial.py", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "# Generated by Django 2.0 on 2018-02-15 15:43\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='Contest',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=64)),\n ],\n ),\n migrations.CreateModel(\n name='Submission',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('image_url', models.URLField()),\n ('title', models.CharField(max_length=128)),\n ('author', models.CharField(max_length=128)),\n ('category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='submissions.Category')),\n ('contest', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Contest')),\n ],\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('score', models.SmallIntegerField()),\n ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Submission')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='category',\n name='contest',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='submissions.Contest'),\n ),\n ]\n" }, { "alpha_fraction": 0.5401661992073059, "alphanum_fraction": 0.5512465238571167, "avg_line_length": 20.176469802856445, "blob_id": "eba301cd13dc2d2631603c09b685284ccffe3c42", "content_id": "a00ae8e81cbf2a0276c3a9b8e957def746d0bdef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 361, "license_type": "no_license", "max_line_length": 124, "num_lines": 17, "path": "/submissions/templates/submissions/list_contests.html", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "<style>\nth {\n text-align: left;\n}\n</style>\n<h2>Contests</h2>\n\n<table border=\"0\" cellpadding=\"5\">\n <tr>\n <th>Contest</th><th>Submissions</th>\n </tr>\n{% for contest in contests %}\n <tr>\n <td><a href=\"{% url 'show-contest' contest.id %}\">{{contest.name}}</a></td><td>{{contest.submission_set.count}}</td>\n </tr>\n{% endfor %}\n</table>\n\n" }, { "alpha_fraction": 0.7096370458602905, "alphanum_fraction": 0.7096370458602905, "avg_line_length": 35.272727966308594, "blob_id": "0e8f8fa4791292f89e51ff2f392ed8bae1b75bdf", "content_id": "4c6a5379db15a353722437b643a926496777ca89", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 85, "num_lines": 22, "path": "/submissions/admin.py", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.utils.safestring import mark_safe\n\nfrom submissions.models import Contest, Category, Submission, Vote\n# Register your models here.\n\nadmin.site.register(Contest)\nadmin.site.register(Category)\n\nclass SubmissionAdmin(admin.ModelAdmin):\n def view(self, photo):\n return mark_safe('<a href=\"%s\" target=\"_blank\">[view]</a>' % photo.image_url)\n list_display = ('title', 'view', 'author', 'contest', 'category')\n list_filter = ('contest', 'category', 'author')\nadmin.site.register(Submission, SubmissionAdmin)\n\nclass VoteAdmin(admin.ModelAdmin):\n list_display = ('submission', 'user', 'score', 'contest')\n list_filter = ('user',)\n def contest(self, vote):\n return vote.submission.contest.name\nadmin.site.register(Vote, VoteAdmin)\n\n" }, { "alpha_fraction": 0.6036501526832581, "alphanum_fraction": 0.6077823638916016, "avg_line_length": 39.33333206176758, "blob_id": "ec881101e4ca70f24978f53ee7694eec44dfb44c", "content_id": "088c04c2072a9dfc58f2d1978256f07de503ecb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2904, "license_type": "no_license", "max_line_length": 172, "num_lines": 72, "path": "/submissions/views.py", "repo_name": "mhall119/wallpaper_contest", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\nfrom submissions.models import Contest, Category, Submission, Vote\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n@login_required\ndef list_contests(request):\n context = {\n 'contests': Contest.objects.all()\n }\n return render(request, 'submissions/list_contests.html', context)\n \n@login_required\ndef show_contest(request, contest_id):\n contest = Contest.objects.get(id=contest_id)\n submission_count = contest.submission_set.count()\n submitter = set()\n for submission in contest.submission_set.all():\n submitter.add(submission.author)\n voter = {}\n results = {}\n for vote in Vote.objects.filter(submission__contest=contest):\n if vote.user.username not in voter:\n voter[vote.user.username] = {'count': 0, 'average': 0, 'name': vote.user.username}\n \n voter[vote.user.username]['average'] = ((voter[vote.user.username]['average']*voter[vote.user.username]['count'])+vote.score)/(voter[vote.user.username]['count']+1)\n voter[vote.user.username]['count'] += 1\n voter[vote.user.username]['percent'] = 100*voter[vote.user.username]['count']/submission_count\n \n if vote.submission.id not in results:\n results[vote.submission.id] = {'score': 0, 'photo': vote.submission}\n results[vote.submission.id]['score'] += vote.score\n \n context = {\n 'contest': contest,\n 'submission_count': submission_count,\n 'submitter_count': len(submitter),\n 'vote_count': len(voter),\n 'voters': voter,\n 'results': results\n }\n return render(request, 'submissions/show_contest.html', context)\n\n@login_required\ndef vote(request, contest_id):\n contest = Contest.objects.get(id=contest_id)\n if request.method == 'GET':\n submissions = Submission.objects.filter(contest=contest)\n needvotes = []\n for photo in submissions:\n if photo.vote_set.filter(user=request.user).count() < 1:\n needvotes.append(photo)\n photo.index = len(needvotes)\n context = {\n 'contest': contest,\n 'submissions': needvotes,\n }\n return render(request, 'submissions/vote.html', context)\n elif request.method == 'POST':\n for entry in request.POST.keys():\n if entry.startswith('vote_'):\n submission_id = entry.split('_')[1]\n score = request.POST.get(entry, 0)\n score = int(score)\n if score < 1:\n continue\n try:\n submission = Submission.objects.get(id=submission_id)\n Vote.objects.create(user=request.user, submission=submission, score=score)\n except:\n continue\n return redirect('show-contest', contest_id)\n" } ]
6
JerryAIwei/PolimiRoboticsCourceProject
https://github.com/JerryAIwei/PolimiRoboticsCourceProject
ddcd44f36b943430ae9991150d02f5d04a4bfd92
3f0d28bf19afa53cf996c9ed23a2dcc604dd69ae
96f1fe26e1d10d64a6396245f94bf187f043efb0
refs/heads/master
2022-12-04T16:10:30.300038
2020-07-20T03:01:17
2020-07-20T03:01:17
256,352,488
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7811038494110107, "alphanum_fraction": 0.7839102149009705, "avg_line_length": 25.04878044128418, "blob_id": "f2cdc3825eef4f0444c3396e7c74cf8a706e46a2", "content_id": "dc1a636c3f6c21218308f6cfcc5d14cbd9a9f4e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 1069, "license_type": "no_license", "max_line_length": 152, "num_lines": 41, "path": "/distance_message/CMakeLists.txt", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(distance_message)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp std_msgs message_generation nav_msgs message_filters dynamic_reconfigure distance_service)\n\nadd_message_files(\n FILES\n Status.msg\n\n )\n \ngenerate_messages(\n DEPENDENCIES\n std_msgs\n )\n\n\ngenerate_dynamic_reconfigure_options(\n cfg/dynamic_distance.cfg\n \n)\n\n## Declare a catkin package\ncatkin_package( CATKIN_DEPENDS message_runtime nav_msgs message_filters)\n\n\n\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\n\nadd_executable(pub_status src/pub.cpp)\nadd_dependencies(pub_status ${PROJECT_NAME}_gencfg custom_messages_generate_messages_cpp ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\ntarget_link_libraries(pub_status ${catkin_LIBRARIES})\n\n\nadd_executable(status_sub src/sub.cpp)\nadd_dependencies(status_sub custom_messages_generate_messages_cpp ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\ntarget_link_libraries(status_sub ${catkin_LIBRARIES})\n\n" }, { "alpha_fraction": 0.7828162312507629, "alphanum_fraction": 0.7863962054252625, "avg_line_length": 24.363636016845703, "blob_id": "5c6c9d8400d83da29968872709a6b919500fb0d8", "content_id": "8e6a273d6a07dcfbd0b37b72108b2fd652d15254", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 838, "license_type": "no_license", "max_line_length": 97, "num_lines": 33, "path": "/distance_service/CMakeLists.txt", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(distance_service)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp std_msgs message_generation )\n\nadd_service_files(\n FILES\n ComputeDistance.srv\n)\n\ngenerate_messages(\n DEPENDENCIES\n std_msgs\n)\n\n\n\n\n## Declare a catkin package\ncatkin_package(CATKIN_DEPENDS message_runtime)\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\nadd_executable(compute_distance src/compute_distance.cpp)\ntarget_link_libraries(compute_distance ${catkin_LIBRARIES})\nadd_dependencies(compute_distance ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\n\n\nadd_executable(client src/client.cpp)\ntarget_link_libraries(client ${catkin_LIBRARIES})\nadd_dependencies(client ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})\n\n" }, { "alpha_fraction": 0.6968085169792175, "alphanum_fraction": 0.728723406791687, "avg_line_length": 27.923076629638672, "blob_id": "4d3e5e55795c679a08f19c829aed9df5f6ae72fe", "content_id": "b1b53f48622fe00c531dd31f53b07fa411a342b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 376, "license_type": "no_license", "max_line_length": 81, "num_lines": 13, "path": "/distance_message/cfg/dynamic_distance.cfg", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nPACKAGE = \"distance_message\"\n\nfrom dynamic_reconfigure.parameter_generator_catkin import *\n\ngen = ParameterGenerator()\n\ngen.add(\"safeDistance\", double_t, 0, \"The safe distance parameter\", 5, 0, 100)\ngen.add(\"crashDistance\", double_t, 0, \"The crash distance parameter\",1, 0, 100)\n\n\n\nexit(gen.generate(PACKAGE, \"distance_message\", \"dynamic_distance\"))\n" }, { "alpha_fraction": 0.6330794095993042, "alphanum_fraction": 0.6522306799888611, "avg_line_length": 36.17499923706055, "blob_id": "99a9e9dced11f027c983d913c2e54ffe876bd7b9", "content_id": "0c81c90ab33f78eafde3e293b7f5d4d79e05b8bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 4611, "license_type": "no_license", "max_line_length": 287, "num_lines": 120, "path": "/README.txt", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "Team member:\r\nShaoxun Xu\r\nAiwei Yin\r\nJiaying Lyu\r\n\r\n# Description of the files inside the archive:\r\n\r\n|-- PolimiRoboticsCourceProject\r\n |-- README.txt\r\n |-- data\r\n | |-- project.bag // project data\r\n |-- distance_message\r\n | |-- CMakeLists.txt\r\n | |-- package.xml\r\n | |-- cfg\r\n | | |-- dynamic_distance.cfg // dynamic reconfigure to change the two thresholds\r\n | |-- msg\r\n | | |-- Status.msg // difine of the costume message\r\n | |-- src\r\n | |-- pub.cpp // receive odom data and use distance_service to compute distance between car and obs, publish message to show car status\r\n | |-- sub.cpp // receive the message published by pub, print it out\r\n |-- distance_service // service to compute distance between car and obs \r\n | |-- CMakeLists.txt\r\n | |-- package.xml\r\n | |-- src\r\n | | |-- client.cpp // for test\r\n | | |-- compute_distance.cpp\r\n | |-- srv\r\n | |-- ComputeDistance.srv\r\n |-- launch // lanuch all the nodes\r\n | |-- project.launch // launch this file, it will start all nodes and play the ros bag of the project as well\r\n |-- lla2enu // for debug\r\n | |-- CMakeLists.txt\r\n | |-- package.xml\r\n | |-- build\r\n | |-- scripts\r\n | | |-- lla2enu_py.py\r\n | |-- src\r\n | |-- sub.cpp \r\n |-- lla2tf // for debug\r\n | |-- CMakeLists.txt\r\n | |-- package.xml\r\n | |-- build\r\n | |-- src\r\n | |-- lla2tf.cpp\r\n |-- lla2tfodom \r\n |-- CMakeLists.txt\r\n |-- package.xml\r\n |-- build\r\n |-- src\r\n |-- lla2tfodom.cpp // receive data from project.bag and publish TF and Odom\r\n \r\n# Name of the parameter:\r\n\r\nIn launch file, there are latitude_init, longitude_init and h0, which represents the zero point of the onversion from LLA to ENU.\r\n\"car\" and \"msgPathCar\" represents the name of the car object and the topic name of the car GPS position provided by the ros bag file.\r\nSimilarly, \"obs\" and \"msgPathObs\" represents the name of the obstacle objectt and the topic name of the obstacle GPS position provided by the ros bag file.\r\n \r\n# Structure of the tf tree:\r\n/world\r\n └───>/obs\r\n └───>/car\r\n \r\n\r\n# Structure of the custom message:\r\n\r\ndistance_message::Status includes\r\n-float64 distance: the distance between car and obstacle.\r\n-string status: \"Safe\" ,\"Unsafe\" ,and \"Crash\" indicate different status of the car.\r\n\r\n# usage:\r\n\r\nWe test our project on ubuntu 18.04 with ROS Melodic\r\n\r\nPrerequisites\r\n```\r\nmkdir -p ~/your_workspace/src\r\ncd ~/your_workspace/src\r\n```\r\nput PolimiRoboticsCourceProject Folder here\r\n\r\nCompile\r\n```\r\ncd ~/your_workspace\r\ncatkin_make\r\n\r\n```\r\nif there are some error during compile, please rerun ```catkin_make -j1```, this is because possible problems of the multithreading compile.\r\n\r\nRunning\r\n```\r\nsource devel/setup.bash\r\nroslaunch src/PolimiRoboticsCourceProject/launch/project.launch \r\n```\r\nThis will start all the necessary nodes and play the ros bag of the data as well. The bag is played at a rate of 3 because there are no obstacle GPS position data at the beginning, it will take a while and then you will get the output of the distance and status on the same terminal tab.\r\n\r\nYou will see the output like \r\n```[ INFO] [1588785320.187441988]: Distance: 6.272685, status: Safe.``` \r\n(Please wait for few seconds to see the output, since there is no data from obs at the beginning)\r\n\r\nYou can run\r\n```rostopic echo car_odom```\r\n```rostopic echo obs_odom```\r\nin new terminals to see the Odom for car and obstacle.\r\n\r\nYou can run\r\n```rostopic echo tf```\r\nin new terminals to see the TF for car and obstacle\r\n\r\nYou can run \r\n``` rosrun rqt_reconfigure rqt_reconfigure ``` \r\nto dynamicly reconfigure the parameters of the safe distance and crash distance between the car and the obstacle. The crash distance should be less than the safe distance. The default safe distance is 5m and the default crash distance is 1m as required by the project.\r\n\r\n# Info you think are important/interesting:\r\n\r\n---catkin_make may not always compile the code in the right way, we should add ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS} in some CmakeLists.txt to make sure it compile some code first as the dependency of other code.\r\n\r\n---when we receive 0,0,0 from GPS, we will output ```[ WARN] [1588864946.484633583]: LOSE GPS!```, then output ```[ INFO] [1588864946.457859507]: Distance: -nan, status: NAN.```\r\n\r\n---In order to view the odometry in Rviz, we also output a car_debug and obs_debug odometry with odom=odom/100.0.\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.6451176404953003, "alphanum_fraction": 0.6562077403068542, "avg_line_length": 37.91579055786133, "blob_id": "fd66007972eb5408c635ddca88a105720769c635", "content_id": "06972392217d115bc8415de6a90c2ced762e683f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3697, "license_type": "no_license", "max_line_length": 116, "num_lines": 95, "path": "/distance_message/src/pub.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include <message_filters/subscriber.h>\n#include <message_filters/time_synchronizer.h>\n#include <message_filters/sync_policies/exact_time.h>\n#include <message_filters/sync_policies/approximate_time.h>\n#include <nav_msgs/Odometry.h>\n#include \"distance_message/Status.h\"\n#include \"distance_service/ComputeDistance.h\"\n#include <dynamic_reconfigure/server.h>\n#include <distance_message/dynamic_distanceConfig.h>\n\n#include <sstream>\n#include <memory>\n\nclass DistancePublisher\n{\n typedef message_filters::sync_policies::ApproximateTime<nav_msgs::Odometry, nav_msgs::Odometry> MySyncPolicy;\n\nprivate:\n ros::NodeHandle n;\n ros::Publisher pub;\n message_filters::Subscriber<nav_msgs::Odometry> car_sub, obs_sub;\n std::unique_ptr<message_filters::Synchronizer<MySyncPolicy>>\n syncPtr;\n distance_service::ComputeDistance srv;\n ros::ServiceClient client;\n double safeDistance;\n double crashDistance;\n std::string car;\n std::string obs;\n dynamic_reconfigure::Server<distance_message::dynamic_distanceConfig> server;\n\n void callback(const nav_msgs::OdometryConstPtr &msg1, const nav_msgs::OdometryConstPtr &msg2)\n {\n ROS_INFO(\"Received two messages: (%f,%f,%f) and (%f,%f,%f)\",\n msg1->pose.pose.position.x, msg1->pose.pose.position.y, msg1->pose.pose.position.z,\n msg2->pose.pose.position.x, msg2->pose.pose.position.y, msg1->pose.pose.position.z);\n srv.request.carX = msg1->pose.pose.position.x;\n srv.request.carY = msg1->pose.pose.position.y;\n srv.request.carZ = msg1->pose.pose.position.z;\n srv.request.obsX = msg2->pose.pose.position.x;\n srv.request.obsY = msg2->pose.pose.position.y;\n srv.request.obsZ = msg2->pose.pose.position.z;\n if (client.call(srv))\n {\n double distance = srv.response.distance;\n distance_message::Status msg;\n msg.distance = distance;\n if (distance > safeDistance)\n msg.status = \"Safe\";\n else if (distance < crashDistance)\n msg.status = \"Crash\";\n else if(distance <= safeDistance&&distance >= crashDistance)\n msg.status = \"Unsafe\";\n\t else \n\t\tmsg.status = \"NAN\";\n pub.publish(msg);\n }\n }\n\n void drCallback(distance_message::dynamic_distanceConfig &config, uint32_t level)\n {\n ROS_INFO(\"Reconfigure Request: %f %f\",\n config.safeDistance, config.crashDistance);\n safeDistance = config.safeDistance;\n crashDistance = config.crashDistance;\n }\n\npublic:\n DistancePublisher(std::string car, std::string obs, double safeDistance = 5.0, double crashDistance = 1.0)\n : car(car), obs(obs), safeDistance(safeDistance), crashDistance(crashDistance)\n {\n pub = n.advertise<distance_message::Status>(\"status\", 1000);\n client = n.serviceClient<distance_service::ComputeDistance>(\"compute_distance\");\n car_sub.subscribe(n, \"/\" + car + \"_odom\", 1000);\n obs_sub.subscribe(n, \"/\" + obs + \"_odom\", 1000);\n syncPtr = std::make_unique<message_filters::Synchronizer<MySyncPolicy>>(MySyncPolicy(10), car_sub, obs_sub);\n syncPtr->registerCallback(boost::bind(&DistancePublisher::callback, this, _1, _2));\n\n dynamic_reconfigure::Server<distance_message::dynamic_distanceConfig>::CallbackType f;\n f = boost::bind(&DistancePublisher::drCallback, this, _1, _2);\n server.setCallback(f);\n }\n};\n\nint main(int argc, char **argv)\n{\n\n ros::init(argc, argv, \"check status\");\n DistancePublisher distancePublishe(argv[1], argv[2]);\n ros::spin();\n\n return 0;\n}\n" }, { "alpha_fraction": 0.778761088848114, "alphanum_fraction": 0.7942478060722351, "avg_line_length": 25.47058868408203, "blob_id": "455fd1db661399299cb259f30afcd601d6b8068f", "content_id": "e18e2e0a7c6b60676862cb0553100afe584cbb07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 452, "license_type": "no_license", "max_line_length": 89, "num_lines": 17, "path": "/lla2tfodom/CMakeLists.txt", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(lla2tfodom)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp std_msgs geodesy sensor_msgs tf nav_msgs )\n\n\n## Declare a catkin package\ncatkin_package(CATKIN_DEPENDS message_runtime)\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\n\n\nadd_executable(lla2tfodom src/lla2tfodom.cpp)\ntarget_link_libraries(lla2tfodom ${catkin_LIBRARIES})\n\n\n" }, { "alpha_fraction": 0.6576576828956604, "alphanum_fraction": 0.6689189076423645, "avg_line_length": 22.263158798217773, "blob_id": "51685fd0c910342525fa0e4fe849f618dfba5675", "content_id": "e0400d1f19e99e1667d30d98d2382984bc2db4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 444, "license_type": "no_license", "max_line_length": 76, "num_lines": 19, "path": "/distance_message/src/sub.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include \"distance_message/Status.h\"\n\nvoid chatterCallback(const distance_message::Status::ConstPtr& msg){\n ROS_INFO(\"Distance: %f, status: %s.\", msg->distance, msg->status.c_str());\n}\n\nint main(int argc, char **argv){\n \t\n\tros::init(argc, argv, \"status_listener\");\n\n\tros::NodeHandle n;\n \tros::Subscriber sub = n.subscribe(\"/status\", 1000, chatterCallback);\n\n \tros::spin();\n\n return 0;\n}\n\n\n" }, { "alpha_fraction": 0.5794923305511475, "alphanum_fraction": 0.6105544567108154, "avg_line_length": 26.981307983398438, "blob_id": "dc107337ee0ed335f64295608087cf94f9a5d261", "content_id": "65f4b1073066b147c415e7713279f952c079a90a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2994, "license_type": "no_license", "max_line_length": 193, "num_lines": 107, "path": "/lla2tf/src/lla2tf.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include \"sensor_msgs/NavSatFix.h\"\n#include <tf/transform_broadcaster.h>\n#include <math.h>\n#include <string>\n\nclass Lla2tfPublisher\n{\nprivate:\n ros::NodeHandle n;\n tf::TransformBroadcaster br;\n ros::Subscriber sub;\n std::string msgPath;\n std::string name;\n float xEast;\n float yNorth;\n float zUp;\n // fixed position\n float latitude_init;\n float longitude_init;\n float h0;\n void lla2neu(const sensor_msgs::NavSatFix::ConstPtr &msg)\n {\n ROS_INFO(\"Input position: [%f,%f, %f]\", msg->latitude, msg->longitude, msg->altitude);\n\n // fixed values\n\n constexpr double a = 6378137;\n constexpr double b = 6356752.3142;\n constexpr double f = (a - b) / a;\n constexpr double e_sq = f * (2 - f);\n constexpr float deg_to_rad = 0.0174533;\n\n // input data from msg\n float latitude = msg->latitude;\n float longitude = msg->longitude;\n float h = msg->altitude;\n\n //lla to ecef\n float lamb = deg_to_rad * (latitude);\n float phi = deg_to_rad * (longitude);\n float s = sin(lamb);\n float N = a / sqrt(1 - e_sq * s * s);\n\n float sin_lambda = sin(lamb);\n float cos_lambda = cos(lamb);\n float sin_phi = sin(phi);\n float cos_phi = cos(phi);\n\n float x = (h + N) * cos_lambda * cos_phi;\n float y = (h + N) * cos_lambda * sin_phi;\n float z = (h + (1 - e_sq) * N) * sin_lambda;\n\n //ROS_INFO(\"ECEF position: [%f,%f, %f]\", x, y,z);\n\n // ecef to enu\n\n lamb = deg_to_rad * (latitude_init);\n phi = deg_to_rad * (longitude_init);\n s = sin(lamb);\n N = a / sqrt(1 - e_sq * s * s);\n\n sin_lambda = sin(lamb);\n cos_lambda = cos(lamb);\n sin_phi = sin(phi);\n cos_phi = cos(phi);\n\n float x0 = (h0 + N) * cos_lambda * cos_phi;\n float y0 = (h0 + N) * cos_lambda * sin_phi;\n float z0 = (h0 + (1 - e_sq) * N) * sin_lambda;\n\n float xd = x - x0;\n float yd = y - y0;\n float zd = z - z0;\n\n xEast = -sin_phi * xd + cos_phi * yd;\n yNorth = -cos_phi * sin_lambda * xd - sin_lambda * sin_phi * yd + cos_lambda * zd;\n zUp = cos_lambda * cos_phi * xd + cos_lambda * sin_phi * yd + sin_lambda * zd;\n\n ROS_INFO(\"ENU position: [%f,%f, %f]\", xEast, yNorth, zUp);\n }\n\npublic:\n Lla2tfPublisher(std::string msgPath = \"/swiftnav/front/gps_pose\", std::string name = \"car\", float latitude_init = 45.6311926152, float longitude_init = 9.2947495255, float h0 = 231.506675163)\n : msgPath(msgPath), name(name),latitude_init(latitude_init), longitude_init(longitude_init), h0(h0)\n {\n sub = n.subscribe(msgPath, 1000, &Lla2tfPublisher::callback, this);\n }\n\n void callback(const sensor_msgs::NavSatFix::ConstPtr& msg)\n {\n tf::Transform transform;\n lla2neu(msg);\n transform.setOrigin(tf::Vector3(xEast, yNorth, zUp));\n br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), \"world\", name));\n }\n};\n\nint\nmain(int argc, char **argv)\n{\n ros::init(argc, argv, \"Lla2tfPublisher\");\n Lla2tfPublisher lla2tfPublisher;\n ros::spin();\n return 0;\n}\n" }, { "alpha_fraction": 0.7706422209739685, "alphanum_fraction": 0.786697268486023, "avg_line_length": 24.52941131591797, "blob_id": "94f31a3c606e99484d4631f16119820d83cbe30d", "content_id": "980152381bf15e99521c4343fb60f9a8c824b91e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "CMake", "length_bytes": 436, "license_type": "no_license", "max_line_length": 89, "num_lines": 17, "path": "/lla2tf/CMakeLists.txt", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "cmake_minimum_required(VERSION 2.8.3)\nproject(lla2tf)\n\n## Find catkin and any catkin packages\nfind_package(catkin REQUIRED COMPONENTS roscpp std_msgs geodesy sensor_msgs tf nav_msgs )\n\n\n## Declare a catkin package\ncatkin_package(CATKIN_DEPENDS message_runtime)\n\n## Build talker and listener\ninclude_directories(include ${catkin_INCLUDE_DIRS})\n\n\n\nadd_executable(lla2tf src/lla2tf.cpp)\ntarget_link_libraries(lla2tf ${catkin_LIBRARIES})\n\n\n" }, { "alpha_fraction": 0.5104224681854248, "alphanum_fraction": 0.5379377603530884, "avg_line_length": 32.00917434692383, "blob_id": "8760de63a2201975465dfd5686d4d1c7bd12e9a1", "content_id": "ffd199d5bbcc5368bc3b9bb36549f0d3d03cd06e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 7196, "license_type": "no_license", "max_line_length": 114, "num_lines": 218, "path": "/lla2tfodom/src/lla2tfodom.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"std_msgs/String.h\"\n#include \"sensor_msgs/NavSatFix.h\"\n#include <tf/transform_broadcaster.h>\n#include <nav_msgs/Odometry.h>\n#include <math.h>\n#include <string>\n\n#define DEBUG\n\nclass Lla2tfOdomPublisher {\nprivate:\n ros::NodeHandle n;\n ros::Publisher odom_pub;\n ros::Publisher odom_pub_debug;\n tf::TransformBroadcaster br;\n ros::Subscriber sub;\n std::string msgPath;\n std::string name;\n\n //position\n float xEast = 0.0;\n float yNorth = 0.0;\n float zUp = 0.0;\n\n float lastx = 0.0;\n float lasty = 0.0;\n float lastz = 0.0;\n\n //time\n ros::Time current_time, last_time;\n // fixed position\n float latitude_init;\n float longitude_init;\n float h0;\n\n\n template<typename T>\n bool isZero(T arg) {\n if (fabs(arg - 0.0) < 0.000000001)return true;\n else return false;\n }\n\n template<typename T, typename... Args>\n bool isZero(T arg, Args... args) {\n if (isZero(arg))\n return isZero(std::forward<Args>(args)...);\n else return false;\n }\n\n\n void lla2neu(const sensor_msgs::NavSatFix::ConstPtr &msg) {\n ROS_INFO(\"Input position: [%f,%f, %f]\", msg->latitude, msg->longitude, msg->altitude);\n\n // fixed values\n\n constexpr double a = 6378137;\n constexpr double b = 6356752.3142;\n constexpr double f = (a - b) / a;\n constexpr double e_sq = f * (2 - f);\n constexpr float deg_to_rad = 0.0174533;\n\n // input data from msg\n float latitude = msg->latitude;\n float longitude = msg->longitude;\n float h = msg->altitude;\n if (isZero(latitude, longitude, h)) {\n ROS_WARN(\"LOSE GPS!\");\n xEast = 0.0;\n yNorth = 0.0;\n zUp = 0.0;\n } else {\n //lla to ecef\n float lamb = deg_to_rad * (latitude);\n float phi = deg_to_rad * (longitude);\n float s = sin(lamb);\n float N = a / sqrt(1 - e_sq * s * s);\n\n float sin_lambda = sin(lamb);\n float cos_lambda = cos(lamb);\n float sin_phi = sin(phi);\n float cos_phi = cos(phi);\n\n float x = (h + N) * cos_lambda * cos_phi;\n float y = (h + N) * cos_lambda * sin_phi;\n float z = (h + (1 - e_sq) * N) * sin_lambda;\n\n //ROS_INFO(\"ECEF position: [%f,%f, %f]\", x, y,z);\n\n // ecef to enu\n\n lamb = deg_to_rad * (latitude_init);\n phi = deg_to_rad * (longitude_init);\n s = sin(lamb);\n N = a / sqrt(1 - e_sq * s * s);\n\n sin_lambda = sin(lamb);\n cos_lambda = cos(lamb);\n sin_phi = sin(phi);\n cos_phi = cos(phi);\n\n float x0 = (h0 + N) * cos_lambda * cos_phi;\n float y0 = (h0 + N) * cos_lambda * sin_phi;\n float z0 = (h0 + (1 - e_sq) * N) * sin_lambda;\n\n float xd = x - x0;\n float yd = y - y0;\n float zd = z - z0;\n\n xEast = -sin_phi * xd + cos_phi * yd;\n yNorth = -cos_phi * sin_lambda * xd - sin_lambda * sin_phi * yd + cos_lambda * zd;\n zUp = cos_lambda * cos_phi * xd + cos_lambda * sin_phi * yd + sin_lambda * zd;\n }\n\n }\n\npublic:\n Lla2tfOdomPublisher(std::string msgPath = \"/swiftnav/front/gps_pose\", std::string name = \"car\",\n float latitude_init = 45.6311926152, float longitude_init = 9.2947495255,\n float h0 = 231.506675163)\n : msgPath(msgPath), name(name), latitude_init(latitude_init), longitude_init(longitude_init), h0(h0) {\n odom_pub = n.advertise<nav_msgs::Odometry>(name + \"_odom\", 1000);\n odom_pub_debug = n.advertise<nav_msgs::Odometry>(name + \"_odom_debug\", 1000);\n current_time = ros::Time::now();\n last_time = ros::Time::now();\n sub = n.subscribe(msgPath, 1000, &Lla2tfOdomPublisher::callback, this);\n\n }\n\n void callback(const sensor_msgs::NavSatFix::ConstPtr &msg) {\n\n lla2neu(msg);\n current_time = ros::Time::now();\n double dt = (current_time - last_time).toSec();\n\n //TF\n tf::Transform transform;\n transform.setOrigin(tf::Vector3(xEast, yNorth, zUp));\n tf::Quaternion q;\n q.setRPY(0, 0, 0);\n transform.setRotation(q);\n br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), \"world\", name));\n\n //Odom\n nav_msgs::Odometry odom;\n odom.header.stamp = current_time;\n odom.header.frame_id = \"world\";\n\n //set the position\n if (isZero(xEast, yNorth, zUp)) {//lose gps\n odom.pose.pose.position.x = 0.0 / 0.0;\n odom.pose.pose.position.y = 0.0 / 0.0;\n odom.pose.pose.position.z = 0.0 / 0.0;\n\n odom.twist.twist.linear.x = 0.0 / 0.0;\n odom.twist.twist.linear.y = 0.0 / 0.0;\n odom.twist.twist.linear.z = 0.0 / 0.0;\n } else {\n odom.pose.pose.position.x = xEast;\n odom.pose.pose.position.y = yNorth;\n odom.pose.pose.position.z = zUp;\n\n odom.twist.twist.linear.x = (xEast - lastx) / dt;\n odom.twist.twist.linear.y = (yNorth - lasty) / dt;\n odom.twist.twist.linear.z = (zUp - lastz) / dt;\n\n lastx = xEast;\n lasty = yNorth;\n lastz = zUp;\n last_time = current_time;\n }\n ROS_INFO(\"ENU position: [%f,%f, %f]\", odom.pose.pose.position.x, odom.pose.pose.position.y,\n odom.pose.pose.position.z);\n ROS_INFO(\"velocity: [%f,%f, %f]\", odom.twist.twist.linear.x, odom.twist.twist.linear.y,\n odom.twist.twist.linear.z);\n //publish the message\n odom_pub.publish(odom);\n#ifdef DEBUG\n //debug Odom\n nav_msgs::Odometry odom_debug;\n odom_debug.header.stamp = current_time;\n odom_debug.header.frame_id = \"world\";\n\n //set the position\n if (isZero(xEast, yNorth, zUp)) {//lose gps\n odom_debug.pose.pose.position.x = 0.0 / 0.0;\n odom_debug.pose.pose.position.y = 0.0 / 0.0;\n odom_debug.pose.pose.position.z = 0.0 / 0.0;\n\n odom_debug.twist.twist.linear.x = 0.0 / 0.0;\n odom_debug.twist.twist.linear.y = 0.0 / 0.0;\n odom_debug.twist.twist.linear.z = 0.0 / 0.0;\n } else {\n odom_debug.pose.pose.position.x = xEast/100;\n odom_debug.pose.pose.position.y = yNorth/100;\n odom_debug.pose.pose.position.z = zUp/100;\n\n odom_debug.twist.twist.linear.x = (xEast - lastx) / dt;\n odom_debug.twist.twist.linear.y = (yNorth - lasty) / dt;\n odom_debug.twist.twist.linear.z = (zUp - lastz) / dt;\n }\n odom_pub_debug.publish(odom_debug);\n#endif\n\n }\n};\n\nint main(int argc, char **argv) {\n ros::init(argc, argv, \"Lla2odomPublisher\");\n std::string msgPath = argv[1];\n std::string name = argv[2];\n double latitude_init = atof(argv[3]);\n double longitude_init = atof(argv[4]);\n double h0 = atof(argv[5]);\n Lla2tfOdomPublisher lla2tfOdomPublisher(msgPath, name, latitude_init, longitude_init, h0);\n ros::spin();\n return 0;\n}\n" }, { "alpha_fraction": 0.6844547390937805, "alphanum_fraction": 0.689095139503479, "avg_line_length": 33.47999954223633, "blob_id": "139c5e24f5ae284b81592b12005ce81636bce850", "content_id": "78ab365c1089f36882139e053a85dd44d6ca9412", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 862, "license_type": "no_license", "max_line_length": 103, "num_lines": 25, "path": "/distance_service/src/compute_distance.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"distance_service/ComputeDistance.h\"\n\n#include <math.h>\nbool distance(distance_service::ComputeDistance::Request &req,\n distance_service::ComputeDistance::Response &res)\n{\n res.distance = sqrt(pow(req.carX-req.obsX,2)+pow(req.carY-req.obsY,2)+pow(req.carZ-req.obsZ,2));\n ROS_INFO(\"request: carX=%f, carY=%f, carZ=%f,\", (double)req.carX, (double)req.carY,(double)req.carZ);\n ROS_INFO(\"request: obsX=%f, obsY=%f, obsZ=%f,\", (double)req.obsX, (double)req.obsY,(double)req.obsZ);\n ROS_INFO(\"sending back response: [%f]\", (double)res.distance);\n return true;\n}\n\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"compute_distance_server\");\n ros::NodeHandle n;\n\n ros::ServiceServer service = n.advertiseService(\"compute_distance\", distance);\n ROS_INFO(\"Ready to compute distance.\");\n ros::spin();\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6370192170143127, "alphanum_fraction": 0.65625, "avg_line_length": 23.47058868408203, "blob_id": "78fe6f14292216406cbcc81c814db0a87be2b8f5", "content_id": "1e7569fa03405c3324907ea5291662584b9d8abd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 832, "license_type": "no_license", "max_line_length": 101, "num_lines": 34, "path": "/distance_service/src/client.cpp", "repo_name": "JerryAIwei/PolimiRoboticsCourceProject", "src_encoding": "UTF-8", "text": "#include \"ros/ros.h\"\n#include \"distance_service/ComputeDistance.h\"\n\n\nint main(int argc, char **argv)\n{\n ros::init(argc, argv, \"compute_distance_client\");\n if (argc != 7)\n {\n ROS_INFO(\"usage: compute_distance_client x1 y1 z1 x2 y2 z2\");\n return 1;\n }\n\n ros::NodeHandle n;\n ros::ServiceClient client = n.serviceClient<distance_service::ComputeDistance>(\"compute_distance\");\n distance_service::ComputeDistance srv;\n srv.request.carX = atof(argv[1]);\n srv.request.carY = atof(argv[2]);\n srv.request.carZ = atof(argv[3]);\n srv.request.obsX = atof(argv[4]);\n srv.request.obsY = atof(argv[5]);\n srv.request.obsZ = atof(argv[6]);\n if (client.call(srv))\n {\n ROS_INFO(\"distance: %f\", (double)srv.response.distance);\n }\n else\n {\n ROS_ERROR(\"Failed to call service add_two_ints\");\n return 1;\n }\n\n return 0;\n}\n" } ]
12
since2016/DL-pytorch
https://github.com/since2016/DL-pytorch
634b129ad9f263422975d9b1028827ea091c44fb
af6c3950966e6f7a3983e253220a20c4b7fa3bde
3b79f312e4949e1b91725eb89d21bc2d6ceee660
refs/heads/master
2020-12-28T13:36:35.450128
2020-02-06T02:29:23
2020-02-06T02:29:23
238,352,374
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5488215684890747, "alphanum_fraction": 0.6026936173439026, "avg_line_length": 18.799999237060547, "blob_id": "3b24e5f3b0f23f3190b156872d16152fd7e53ecb", "content_id": "edf149eb482355e5ff79c1711beb65f0558a30b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 49, "num_lines": 15, "path": "/chapter5/episode1.py", "repo_name": "since2016/DL-pytorch", "src_encoding": "UTF-8", "text": "import torch as t\nfrom torch import nn\n\ndef comp_conv2d(conv2d, X):\n X = X.view((1,1)+X.shape)\n Y = conv2d(X)\n return Y.view(Y.shape[2:])\n\n\nconv2d = nn.Conv2d(in_channels=1, out_channels=1,\n kernel_size=3, padding=1)\n\nX = t.rand(8,8)\nY = comp_conv2d(conv2d, X)\nprint(Y)\n" }, { "alpha_fraction": 0.6542699933052063, "alphanum_fraction": 0.6928374767303467, "avg_line_length": 23.233333587646484, "blob_id": "3ffa1da379c98a35075edfec549e5aa7db12ce6d", "content_id": "5cbaeeba2255c5b8bdff9ba0453807464268ce26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 726, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/chapter3/mlp简单版.py", "repo_name": "since2016/DL-pytorch", "src_encoding": "UTF-8", "text": "import torch as t\nfrom torch import nn\nfrom torch.nn import init\nimport numpy as np\nimport sys\nimport d2lzh_pytorch as d2l\n\nnum_inputs, num_outputs, num_hidden = 784, 10, 256\ndrop_prob1 = 0.3\nnet = nn.Sequential(\n d2l.FlattenLayer(),\n nn.Linear(num_inputs, num_hidden),\n nn.ReLU(),\n nn.Dropout(drop_prob1),\n nn.Linear(num_hidden, num_outputs),\n\n)\n\nfor params in net.parameters():\n init.normal_(params, mean=0, std=0.01)\n\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\nloss = t.nn.CrossEntropyLoss()\n\noptimizer = t.optim.SGD(net.parameters(), lr=0.5)\n\nnum_epochs = 5\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs,\n batch_size, None, None, optimizer)" }, { "alpha_fraction": 0.5665891766548157, "alphanum_fraction": 0.57987380027771, "avg_line_length": 30.375, "blob_id": "a27ebf0163a624d1a3bd4b19000cdd7af6bc991c", "content_id": "71d146c4ac02c12f6763f2d0eeef8d1e2777ca8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3089, "license_type": "no_license", "max_line_length": 96, "num_lines": 96, "path": "/chapter3/softmax简介版.py", "repo_name": "since2016/DL-pytorch", "src_encoding": "UTF-8", "text": "import torch as t\nimport torchvision\nfrom torch import nn\nfrom torch.nn import init\nimport numpy as np\nimport sys\nimport d2lzh_pytorch as d2l\nimport torchvision.transforms as transforms\n\n# 读取小批量, 获取数据\n# 载入FashionMNIST数据集\nmnist_train = torchvision.datasets.FashionMNIST(root='~/Datasets/MNIST', train=True,\n download=False, transform=transforms.ToTensor())\nmnist_test = torchvision.datasets.FashionMNIST(root='~/Datasets/MNIST', train=False,\n download=False, transform=transforms.ToTensor())\n\nbatch_size = 256\nif sys.platform.startswith('win'):\n num_worker = 0\nelse:\n num_worker = 4\n\ntrain_iter = t.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True,\n num_workers=num_worker)\ntest_iter = t.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False,\n num_workers=num_worker)\n\nnum_inputs = 784\nnum_outputs = 10\n\n# 定义模型\nclass LinearNet(nn.Module):\n def __init__(self, num_inputs, num_outputs):\n super(LinearNet, self).__init__()\n self.linear = nn.Linear(num_inputs, num_outputs)\n def forward(self, x):\n y = self.linear(x.shape[0], -1)\n return y\n\n\n# 对x 的形状进行放平\nclass FlattenLayer(nn.Module):\n def __init__(self):\n super(FlattenLayer, self).__init__()\n def forward(self, x):\n return x.view(x.shape[0], -1)\n\nfrom collections import OrderedDict\n# 定义网络模型\nnet = nn.Sequential(\n OrderedDict([\n ('flatten', FlattenLayer()),\n ('linear', nn.Linear(num_inputs, num_outputs))\n ])\n\n)\n\n# 初始化 W, b\ninit.normal_(net.linear.weight, mean=0, std=0.01)\ninit.constant_(net.linear.bias, val=0)\n\nloss = nn.CrossEntropyLoss()\noptimzer = t.optim.SGD(net.parameters(), lr=0.1)\n\n# 训练模型\nnum_epochs = 5\n\ndef train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params=None, lr=None, optimizer=None):\n for epoch in range(num_epochs):\n train_l_sum, train_acc_sum, n = 0.0, 0.0, 0\n for X, y in train_iter:\n y_hat = net(X)\n l = loss(y_hat, y).sum()\n\n if optimizer is not None:\n optimizer.zero_grad()\n elif params is not None and params[0].grad is not None:\n for param in params:\n param.grad.data.zero_()\n\n l.backward()\n if optimizer is None:\n d2l.sgd(params, lr, batch_size)\n else:\n optimizer.step()\n\n train_l_sum += l.item()\n train_acc_sum += (y_hat.argmax(dim=1)==y).sum().item()\n n+=y.shape[0]\n\n test_acc = d2l.evaluate_accuracy(test_iter, net)\n print('epoch %d, loss %.4f, train_acc %.3f, test_acc %.3f' %(epoch+1, train_l_sum/n,\n train_acc_sum/n, test_acc))\n\ntrain_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimzer)" }, { "alpha_fraction": 0.5898653864860535, "alphanum_fraction": 0.6349960565567017, "avg_line_length": 28.395349502563477, "blob_id": "63bc9b937b8298c7febefec6ec962bb2aa0af086", "content_id": "1dd1ca14f1238ab4b1a3a9cd00aed34fbdc05c09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1295, "license_type": "no_license", "max_line_length": 82, "num_lines": 43, "path": "/chapter3/mlpact.py", "repo_name": "since2016/DL-pytorch", "src_encoding": "UTF-8", "text": "import torch as t\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport d2lzh_pytorch as d2l\n\n# def xyplot(x_vals, y_vals, name):\n# d2l.set_figsize(figsize=(5, 2.5))\n# d2l.plt.plot(x_vals.detach().numpy(), y_vals.detach().numpy())\n# d2l.plt.xlabel('x')\n# d2l.plt.ylabel(name +'(x)')\n#\n# x = t.arange(-8.0, 8.0, 0.1, requires_grad=True)\n# y = x.relu()\n# xyplot(x, y, 'relu')\n\n# 获取数据\nbatch_size = 256\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)\n\n#定义模型\nnum_inputs, num_outputs, num_hidden = 784, 10 ,256\n\nW1 = t.tensor(np.random.normal(0, 0.01, (num_inputs, num_hidden)), dtype=t.float,\n requires_grad=True)\nb1 = t.zeros(num_hidden, dtype=t.float, requires_grad=True)\nW2 = t.tensor(np.random.normal(0, 0.01, (num_hidden, num_outputs)), dtype=t.float,\n requires_grad=True)\nb2 = t.zeros(num_outputs, dtype=t.float, requires_grad=True)\nparams = [W1, b1, W2, b2]\n# 激活函数\ndef relu(X):\n return t.max(input=X, other=t.tensor(0.0))\n\n# 定义模型\ndef net(X):\n X = X.view((-1, num_inputs))\n H = relu(t.matmul(X, W1) +b1)\n return t.matmul(H, W2) + b2\n\nloss = t.nn.CrossEntropyLoss()\nnum_epochs, lr = 5, 100.0\nd2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,\n params, lr)" }, { "alpha_fraction": 0.5042773485183716, "alphanum_fraction": 0.5569563508033752, "avg_line_length": 35.426231384277344, "blob_id": "a81d4e5885ea63c9d90fb2f1676bb653a6e9d81b", "content_id": "6bda7213da68cd11f4213d34ecbf5a38313d64c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2221, "license_type": "no_license", "max_line_length": 94, "num_lines": 61, "path": "/chapter5/vgg.py", "repo_name": "since2016/DL-pytorch", "src_encoding": "UTF-8", "text": "import time\nimport torch as t\nfrom torch import nn, optim\n\nimport sys\nimport d2lzh_pytorch as d2l\ndevice = t.device('cuda' if t.cuda.is_available() else 'cpu')\n\ndef vgg_block(num_convs, in_channels, out_channels):\n blk = []\n for i in range(num_convs):\n if i == 0:\n blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3,padding=1))\n else:\n blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3,padding=1))\n blk.append(nn.ReLU())\n blk.append(nn.MaxPool2d(kernel_size=2, stride=2))\n return nn.Sequential(*blk)\n\nconv_arch = ((1,1,64), (1,64, 128), (2, 128, 256), (2, 256, 512), (2, 512, 512))\nfc_feature = 512*7*7\nfc_hidden_units = 4096\n\ndef vgg(conv_arch, fc_feature, fc_hidden_units=4096):\n net = nn.Sequential()\n\n for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):\n net.add_module('vgg_block_'+str(i+1), vgg_block(num_convs, in_channels, out_channels))\n\n net.add_module(\"fc\", nn.Sequential(d2l.FlattenLayer(),\n nn.Linear(fc_feature, fc_hidden_units),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(fc_hidden_units, fc_hidden_units),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Linear(fc_hidden_units, 10)\n ))\n\n return net\n\nnet = vgg(conv_arch, fc_feature, fc_hidden_units)\nX = t.rand(1, 1, 224, 224)\n#\n# for name, blk in net.named_children():\n# X = blk(X)\n# print(name, 'output shape: ', X.shape)\n\nratio = 8\nsmall_conv_arch = [(1,1,64//ratio), (1, 64//ratio, 128//ratio), (2, 128//ratio, 256//ratio),\n (2, 256//ratio, 512//ratio), (2, 512//ratio, 512//ratio)]\n\nnet = vgg(small_conv_arch, fc_feature//ratio, fc_hidden_units//ratio)\nprint(net)\n\nbatch_size = 64\ntrain_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)\n\nlr , num_epochs = 0.001, 5\noptimizer = t.optim.Adam(net.parameters(), lr=lr)\nd2l.train_ch5(net, train_iter, test_iter, batch_size, optimizer, device, num_epochs)" } ]
5
vishaljain3991/reports2sql
https://github.com/vishaljain3991/reports2sql
7a278d820ff65026edfb435822efac24cdd20e70
98a340a167501d45f036a8731f41434ea1f2f3f3
03483eb845567a1ea783214c462d7d6ce7b97420
refs/heads/master
2021-01-01T19:20:33.129205
2014-06-27T15:01:56
2014-06-27T15:01:56
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5789905786514282, "alphanum_fraction": 0.6120229959487915, "avg_line_length": 32.56551742553711, "blob_id": "b5392b61110fa77b4af1a820cbc604553174ab77", "content_id": "7674f6c1aa4d4b467be168c690a34551f97d8c84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4874, "license_type": "no_license", "max_line_length": 113, "num_lines": 145, "path": "/done/pos_fetch.py~", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#In this file we refined the dsignation columns of analysts i.e. a1_pos and a2_pos. So for e.g.\n#if a designation was Sr. Vice Pres. we changed it to Senior Vice President. This was done to \n#maintain coherence in our database. Additionally if some designation was SVP- Sr. Credit Officer\n#we added Senior Vice President to the a1_pos or a2_pos column and added Senior Credit Officer to \n#the a1_aux or a2_aux column.\n\nimport psycopg2\nimport re\nconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\nprint \"Opened database successfully\"\n\ncur = conn.cursor()\nfo=open(\"names.txt\", \"wb+\")\n\n#We select all positions occuring in our database\ncur.execute(\"SELECT DISTINCT A1_POS FROM RATINGS1 UNION SELECT DISTINCT A2_POS FROM RATINGS1\")\nrows = cur.fetchall()\n\n#lis=['a']\nj=0\nk=0\nfor row1 in rows:\n\t#row1 is a tuple which is immutable. So we convert the tuple to a list.\n\trow=list(row1)\n\tif('Sr.' in row[0]):\n\t\trow[0]=row[0].replace(\"Sr.\",\"Senior\")\n\t\t\n\telif('Sr' in row[0]):\n\t\trow[0]=row[0].replace(\"Sr\", \"Senior\")\n\t\t\n\t#print row[0]\n\tj=j+1\n\t\n\t#Firstly we search for the regex S.*V.*P.* to determine whether designation is Senior Vice President\n\t#Then we search for the regex S.*V.*P.*-.* to determine whether there is an additional designation.\n\t\n\tif(re.search(\"^S.*V.*P.*\", row[0])):\n\t\t\n\t\tif(re.search(\"S.*V.*P.*-.*\", row[0])):\n\t\t\t\n\t\t\t#We search for '-' and break after that to get the additional designation\n\t\t\t\n\t\t\ti=row[0].index('-')\n\t\t\ti=i+1\n\t\t\t\n\t\t\twhile(row[0][i]==' ' and i<len(row[0])):\n\t\t\t\ti=i+1\n\t\t\t#We then update a1_aux column with additional designation\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A1_AUX='\"+row[0][i:]+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A2_AUX='\"+row[0][i:]+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\telse:\n\t\t\t#print row[0]\n\t\t\tprint 'yes'\n\t\t#Next we update the a1_pos column. For e.g. if a designation was Sr. Vice Pres. we change it to \n\t\t#Senior Vice President\n\t\tcur.execute(\"UPDATE RATINGS1 SET A1_POS='\"+\"Senior Vice President\"+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tcur.execute(\"UPDATE RATINGS1 SET A2_POS='\"+\"Senior Vice President\"+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tk=k+1\n\t\t\n\t#As usually we search for Assistant Vice President and the procedures are same as above.\n\telif(re.search(\"^A.*V.*P.*\", row[0])):\n\t\tif(re.search(\"A.*V.*P.*-.*\", row[0])):\n\t\t\t#print row[0]\n\t\t\ti=row[0].index('-')\n\t\t\ti=i+1\n\t\t\t#print row[0]\n\t\t\twhile(row[0][i]==' ' and i<len(row[0])):\n\t\t\t\ti=i+1\n\t\t\t#print row[0][i:]\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A1_AUX='\"+row[0][i:]+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A2_AUX='\"+row[0][i:]+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\telse:\n\t\t\t#print row[0]\n\t\t\tprint 'yes'\n\t\tcur.execute(\"UPDATE RATINGS1 SET A1_POS='\"+\"Assistant Vice President\"+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tcur.execute(\"UPDATE RATINGS1 SET A2_POS='\"+\"Assistant Vice President\"+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tk=k+1\n\t\n\t#As usually we search for Vice President and the procedures are same as above.\n\telif(re.search(\"^V.*P.*\", row[0])):\n\t\tif(re.search(\"V.*P.*-.*\", row[0])):\n\t\t\t#print row[0]\n\t\t\ti=row[0].index('-')\n\t\t\ti=i+1\n\t\t\t#print row[0]\n\t\t\twhile(row[0][i]==' ' and i<len(row[0])):\n\t\t\t\ti=i+1\n\t\t\t#print row[0][i:]\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A1_AUX='\"+row[0][i:]+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A2_AUX='\"+row[0][i:]+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\telse:\n\t\t\t#print row[0]\n\t\t\tprint 'yes'\n\t\tcur.execute(\"UPDATE RATINGS1 SET A1_POS='\"+\"Vice President\"+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tcur.execute(\"UPDATE RATINGS1 SET A2_POS='\"+\"Vice President\"+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tk=k+1\n\t\t\n\t#As usually we search for Managing Director and the procedures are same as above.\n\telif(re.search(\"M.*D.*\", row[0])):\n\t\tif('-' in row[0]):\n\t\t\t#print row[0]\n\t\t\ti=row[0].index('-')\n\t\t\ti=i+1\n\t\t\t#print row[0]\n\t\t\twhile(row[0][i]==' ' and i<len(row[0])):\n\t\t\t\ti=i+1\n\t\t\t#print row[0][i:]\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A1_AUX='\"+row[0][i:]+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\t\tcur.execute(\"UPDATE RATINGS1 SET A2_AUX='\"+row[0][i:]+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\t\tconn.commit()\n\t\telse:\n\t\t\t#print row[0]\n\t\t\tprint 'yes'\n\t\tk=k+1\n\t\tcur.execute(\"UPDATE RATINGS1 SET A1_POS='\"+\"Managing Director\"+\"' WHERE A1_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\tcur.execute(\"UPDATE RATINGS1 SET A2_POS='\"+\"Managing Director\"+\"' WHERE A2_POS='\"+row[0]+\"';\")\n\t\tconn.commit()\n\t\t\t\n\telse:\n\t\t#print row[0]\n\t\tk=k+1\n\t#if(re.search(\".*-.*\", row[0])):\n\t\t#print \"Position: \", row[0]\n\t#print \"Analyst 2 Name: \", row[6], \"\\n\"\n\t#print \"Position: \", row[0]\n\nprint j\nprint k\n\t\n \t\n" }, { "alpha_fraction": 0.6864737868309021, "alphanum_fraction": 0.7013291716575623, "avg_line_length": 39.4603157043457, "blob_id": "2812c81eda3824c27a60a84670e9ab8374c9207c", "content_id": "b8bf0e5b03b71da827b74fd929946af31983f826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2558, "license_type": "no_license", "max_line_length": 116, "num_lines": 63, "path": "/done/refiner.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#Now we refine i.e. clean our database. In this file we open names.txt which contains the names of \n#analysts as mentioned in the analysts_YYYY-MM-YY.txt file and alongside that only first and last names\n#of the analysts. This file replaces all the instances of the names of full name of analyst with their \n#first and last names.\n\nimport nltk\nimport os\nimport psycopg2\nfo=open(\"names.txt\", \"rb+\")\nraw=fo.read()\nconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\ncur=conn.cursor()\n\n#We split the text read from names.txt file using \\n delimiter. \nsents=raw.split('\\n')\n\n#Each sentence contains the full names of analyst alongwith their first and last names. We form a\n#Dictionary where a full name points to first and last name.\n\nindex={} #forming a dictionary\nfor sent in sents:\n\tif(sent!=''):\n\t\t#We split every sentence into full name and \"first name and last name\" on the basis\n\t\t#delimiter '#'\n\t\tt=sent.split('#')\n\t\tindex[t[0]]=t[1]\n\n#All the keys of dictionary we are basically refining our ratings1 table reducing names to just first and last names\nprint index['Christopher Wimmer, CFA']\n\n#'CFA','CPA','Dr.' are the additional designations that comes with a person name, we separate them out and \n#put it in a separate column called a1_add or a2_add depending on whether the person was 1st or 2nd anlayst\n#on the report.\n\nbuzz=['CFA','CPA','Dr.']\nfor t in index.keys():\n\ttokens=t.split(\" \")\n\t\n\t#For every full name we determine whether there is anything common between the set of words in token\n\t#and the set of words in buzz. Generally, a name has only one designation if at all it has it. so \n\t#if a name contains a designation the cardinality of intersected set comes out to be greater than or equal\n\t#to one.\n\tinter=list(set(tokens)&set(buzz)) #whether there is some intersection or not\n\t\n\t\n\tif (len(inter)>0):\n\t\t#Next, we add additional designation in a1_add or a2_add column for full names having additional desingnation\n\t\t\n\t\tcur.execute(\"UPDATE RATINGS1 SET A1_ADD='\"+inter[0]+\"' WHERE A1_NAME='\"+t+\"';\")\n\t\tconn.commit()\n\t\t\n\t\tcur.execute(\"UPDATE RATINGS1 SET A2_ADD='\"+inter[0]+\"' WHERE A2_NAME='\"+t+\"';\")\n\t\tconn.commit()\n\t\t\n\t\n\t#Finally we update the a1_name or a2_name column with \"full name and last name\" \n\tcur.execute(\"UPDATE RATINGS1 SET A1_NAME='\"+index[t]+\"' WHERE A1_NAME='\"+t+\"';\")\n\tconn.commit()\n\tcur.execute(\"UPDATE RATINGS1 SET A2_NAME='\"+index[t]+\"' WHERE A2_NAME='\"+t+\"';\")\n\tconn.commit()\n\t\n\t\n\t\n\t\n\n" }, { "alpha_fraction": 0.5765199065208435, "alphanum_fraction": 0.5849056839942932, "avg_line_length": 26.941177368164062, "blob_id": "1c009494db6fe7f12b4201c895cc7f4fb20b4886", "content_id": "0283805e8126d0a20b34e6eafc8c6d4b38de1d78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 60, "num_lines": 17, "path": "/a_data.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nimport re, nltk, psycopg2 \n\nfo = open(\"/home/finance/reports2sql/r_fil_date.txt\", \"wb+\")\nroot = '/home/finance/data'\n#print os.walk(root, topdown=False)\nfor path, subdirs, files in os.walk(root, topdown=False):\n for name in files:\n w=os.path.join(path, name)\n if((re.search(r'^.*dates$', w))):\n \tprint w\n \tfo.write(w+\" \")\n #print path[21:]\n #for name in subdirs:\n #print(os.path.join(path, name))\n \nfo.close()\n\n\n" }, { "alpha_fraction": 0.5462391972541809, "alphanum_fraction": 0.5647348761558533, "avg_line_length": 26.89655113220215, "blob_id": "39900603629e5e737ce7770ebe07fe407c5c40d5", "content_id": "193b67959ea1d7cb4fa75703247edfe334d5beb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 811, "license_type": "no_license", "max_line_length": 113, "num_lines": 29, "path": "/comp_name.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nimport re, nltk, psycopg2 \nimport dates, a_name\nfrom a_name import analysts\nfrom dates import converter\n\nconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\n\nfo = open(\"/home/finance/comp_name.txt\", \"wb+\")\nroot = '/home/finance/data'\n#print os.walk(root, topdown=False)\nfor path, subdirs, files in os.walk(root, topdown=False):\n for name in files:\n w=os.path.join(path, name)\n if((re.search(r'^.*name$', w))):\n \tprint w\n \t#fo.write(w+\" \")\n \tfoo=open(w, \"ab+\")\n \tt=w.split('/')\n \traw=foo.read()\n \tfo.write(t[4]+\"\\t\"+raw+\"\\n\")\n \tfoo.close()\n \t\n \t\n #print path[21:]\n #for name in subdirs:\n #print(os.path.join(path, name))\n \nfo.close()\n\n\n" }, { "alpha_fraction": 0.6127167344093323, "alphanum_fraction": 0.6383154392242432, "avg_line_length": 29.174999237060547, "blob_id": "602b0be37ec0d02c185cefb97e34e9f51f3d3dbd", "content_id": "1dba83d643404764595f5a28a8d78066764a50d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1211, "license_type": "no_license", "max_line_length": 113, "num_lines": 40, "path": "/done/name_fetch.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#In this file we fetch the names of the analysts and create a names.txt. This file contains the full names of the\n#analyst alongwith the first and last names of the analyst \nimport psycopg2\nimport re\nconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\nprint \"Opened database successfully\"\n\ncur = conn.cursor()\nfo=open(\"names.txt\", \"wb+\")\ncur.execute(\"SELECT * FROM RATINGS1\")\nrows = cur.fetchall()\nl=[]\n#lis=['a']\nfor row in rows:\n\t#print \"Analyst 1 Name: \", row[2]\n\t#print \"Analyst 2 Name: \", row[6], \"\\n\"\n\t\n \tgroup=[row[2],row[6]]\n \t\n \tl=l+group\n \t\n \t\na=set(l)\nuni=list(a)\n#print uni\n#print len(uni)\nfor t in uni:\n\t#Some of the names had unicode character \\xc2\\xa0, so we replaced the character with a blank and\n\t#processed the names.\n\tif(re.search(r'.*\\xc2\\xa0.*', t)):\n\t\tt=t.replace(\"\\xc2\\xa0\", \" \") #replacing \\xc2\\xa0 with blank using str.replace method\n\t\t#print t\n\t\t\n\t#We split the names and basically included the first and the last token.\n\ttokens=t.split(\" \")\n\tprint [t,tokens[0]+' '+tokens[-1]]\n\tfo.write(t+' /'+tokens[0]+' '+tokens[-1]+'\\n')\n \n" }, { "alpha_fraction": 0.5772594809532166, "alphanum_fraction": 0.6115160584449768, "avg_line_length": 26.979591369628906, "blob_id": "e8d6c048986e542984576efdfec6e36395332ca9", "content_id": "7213fcbf7e78ec5a33e020f0ea109bf1d57261f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1372, "license_type": "no_license", "max_line_length": 169, "num_lines": 49, "path": "/extract_name.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nos.chdir('/home/finance/reports2sql')\nimport re, nltk \nimport dates, a_name\nimport analysts_name\nfrom analysts_name import analysts\nfrom dates import converter\nimport psycopg2 \n\ndef extractor(root):\n\tu=0\n\tfo = open(root, 'rb+')\n\traw=fo.read()\n\tlocations=nltk.word_tokenize(raw)\n\tconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\n\ttokens=root.split('/')\n\ti=-1\n\t#print locations\n\t#print tokens\n\tcount=0\n\twhile(i+1<len(locations)):\n\t\ti=i+1\n\t\t\n\t\t\t\n\t\tstring= '/home/finance/data/'+tokens[4]+'/analysts_'+locations[i]+'.txt'\n\t\ttry:\n\t\t\tg=analysts(string) #here the analysts name alogwith their dept and posotion is returned\n\t\t\tcur = conn.cursor()\n\t\t\tprint \"INSERT INTO RATINGS1 VALUES (\"+tokens[4]+\",'\"+g[0]+\"','\"+g[1]+\"','\"+g[2]+\"','\"+g[3]+\"','\"+g[4]+\"','\"+g[5]+\"','\"+g[6]+\"','\"+g[7]+\"','\"+locations[i]+\"');\"\n\t\t\tcur.execute(\"INSERT INTO RATINGS1 VALUES (\"+tokens[4]+\",'\"+g[0]+\"','\"+g[1]+\"','\"+g[2]+\"','\"+g[3]+\"','\"+g[4]+\"','\"+g[5]+\"','\"+g[6]+\"','\"+g[7]+\"','\"+locations[i]+\"');\")\n\t\t\tconn.commit()\n\t\t\t#print g\n\t\texcept IOError:\n\t\t\tprint locations[i]\n\t\t\tprint \"file not there\"\n\t\t\tu=u+1\n\t\texcept TypeError:\n\t\t\tprint locations[i]\n\t\t\tprint \"Type error\"\n\t\t\tu=u+1\n\t\texcept IndexError:\n\t\t\tprint \"Index error\"\n\t\t\tu=u+1\n\t\t\t\n\tprint tokens[4]\n\tconn.close()\n\treturn u\n\"\"\"string='30 Nov 99'\nprint converter(string)\"\"\"\n\n" }, { "alpha_fraction": 0.6697540879249573, "alphanum_fraction": 0.6847652792930603, "avg_line_length": 37.17073059082031, "blob_id": "8c3e162b3005e40aa5189150c0d423b859d9eaab", "content_id": "9d4ac0e81976afaee241487e3bdafa749150d39c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3131, "license_type": "no_license", "max_line_length": 169, "num_lines": 82, "path": "/done/extract_name.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#This file serves as an auxilliary file for the executor.py file. In this file we have \n#defined a single function extractor that takes the location of the dates file of a company\n#id as input and extracts the relevant features from the analysts_YYYY-MM-YY.txt \n#file of the company for all te dates mentioned in the dates file.\n\n#We import analysts function from analysts_name.py file. This function helps us extract the\n#relevant features from analysts_YYYY-MM-YY.txt file.\n\n#We also import psycopg2 package becuase in this filwe we are interacting with the database\n\n#f_not.txt is a file that contains the company id along with the date on which their respective\n#analysts file was not found.\nimport os\nos.chdir('/home/finance/reports2sql')\nimport re, nltk \nimport dates, a_name\nimport analysts_name\nfrom analysts_name import analysts\nfrom dates import converter\nimport psycopg2 \n\ndef extractor(root):\n\tu=0\n\tbo=open('f_not.txt', 'ab+')\n\tfo = open(root, 'rb+')\n\traw=fo.read()\n\t\n\t#IN the next statement we tokenize the file that has been read to extract the dates on which\n\t#the reports were published.\n\tlocations=nltk.word_tokenize(raw)\n\t\n\t#Next, we connect with the database and create an object.\n\tconn = psycopg2.connect(database=\"finance\", user=\"finance\", password=\"iof2014\", host=\"127.0.0.1\", port=\"5432\")\n\t\n\t#In the following operation, we split the root string to extract the company id. In this token[4] \n\t#happens to be the company id\n\ttokens=root.split('/')\n\ti=-1\n\tcount=0\n\t#NOw we execute the loop to go through every date on which the report was published and extract relevant \n\t#information from the analysts_YYYY-MM-YY.txt file. \n\twhile(i+1<len(locations)):\n\t\ti=i+1\n\t\t\n\t\t#We create a string here so that the actual location of analysts_YYYY-MM-YY.txt can be given to\n\t\t#the analysts function.\n\t\tstring= '/home/finance/data/'+tokens[4]+'/analysts_'+locations[i]+'.txt'\n\t\t\n\t\t#Exception handling has been done to catch the exceptions when analysts_YYYY-MM-YY.txt is not found \n\t\t#or there is some TypeError or IndexError.\n\t\ttry:\n\t\t\tg=analysts(string) #here the analysts name alogwith their dept and posotion is returned\n\t\t\tcur = conn.cursor()\n\t\t\tprint \"INSERT INTO RATINGS2 VALUES (\"+tokens[4]+\",'\"+g[0]+\"','\"+g[1]+\"','\"+g[2]+\"','\"+g[3]+\"','\"+g[4]+\"','\"+g[5]+\"','\"+g[6]+\"','\"+g[7]+\"','\"+locations[i]+\"');\"\n\t\t\t\n\t\t\t#In the next statement, we execute our sql command and insert the relevant info into the \n\t\t\t#the database ratings1.\n\t\t\tcur.execute(\"INSERT INTO RATINGS2 VALUES (\"+tokens[4]+\",'\"+g[0]+\"','\"+g[1]+\"','\"+g[2]+\"','\"+g[3]+\"','\"+g[4]+\"','\"+g[5]+\"','\"+g[6]+\"','\"+g[7]+\"','\"+locations[i]+\"');\")\n\t\t\t\n\t\t\t#Next, we commit the transaction that we performed previously.\n\t\t\tconn.commit()\n\t\t\t\n\t\t#Below are the exceptions that can be handled. \n\t\texcept IOError:\n\t\t\tprint locations[i]\n\t\t\tprint \"file not there\"\n\t\t\tu=u+1\n\t\t\tbo.write(tokens[4]+'\\t'+locations[i]+'\\n')\n\t\texcept TypeError:\n\t\t\tprint locations[i]\n\t\t\tprint \"Type error\"\n\t\t\tu=u+1\n\t\texcept IndexError:\n\t\t\tprint \"Index error\"\n\t\t\tu=u+1\n\t\t\t\n\tprint tokens[4]\n\tconn.close()\n\treturn u\n\n" }, { "alpha_fraction": 0.7258187532424927, "alphanum_fraction": 0.7433358430862427, "avg_line_length": 34.43243408203125, "blob_id": "c986223c9f452a1d1f9ccd876f055374e77786b9", "content_id": "e147ce4592bf78abfe8a91f78a19c35645a0e992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1313, "license_type": "no_license", "max_line_length": 71, "num_lines": 37, "path": "/executor.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#Upon execution of this python file you form a database named ratings1\n#in which there are information stored about each report i.e. names of\n#analysts, their positions, their departments etc.\n\n#We import extractor function from extract_name.py file. The extractor\n#function helps us in extracting important features from the report as\n#mentioned in the first para.\n\nimport os\nimport re, nltk, psycopg2 \nimport dates, a_name\nimport extract_name\nfrom extract_name import extractor\n\n\nd=0\n\n#In the next statement we open r_fil_date.txt file. It contains the \n#info about the location of the dates file of various company ids for \n#e.g. /home/finance/data/600045616/dates happens to be the dates file\n#of the company with company id 600045616\nfo = open(\"/home/finance/reports2sql/r_fil_date.txt\", \"rb+\")\nraw=fo.read()\n\n#We use nltk.work_tokenize to break our raw data into tokens where each\n#token is a location of dates file of a company id.\nlocs=nltk.word_tokenize(raw)\n\n#We loop here to go through every date file. THen from corresponding \n#date file we extract the dates on which reports were published. From\n#the reports we extract the relevant features and put it into our \n#database ratings1\t\nfor t in locs:\n\td=d+extractor(t)\n\n\n" }, { "alpha_fraction": 0.4808311462402344, "alphanum_fraction": 0.5054141283035278, "avg_line_length": 21.480262756347656, "blob_id": "f9d9d5641220f06b953c22361c2ddb2b4389f95f", "content_id": "d2b8dddcec90efaa92ee3fa51626096f50a9289c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3417, "license_type": "no_license", "max_line_length": 551, "num_lines": 152, "path": "/analysts_name.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nimport re, nltk \n\ndef analysts(string):\n\tbo=open(\"/home/finance/reports2sql/types.txt\",\"ab+\")\n\t#print 'yes'\n\tfoo=open(string)\n\traw=foo.read()\n\tsents=raw.split('\\n' );\n\twords=nltk.word_tokenize(raw); #for splitting multiple lines\n\tfoo.close()\n\t#foo=open(string1)\n\t#print words\n\t#raw=foo.read()\n#print raw\n#tokens=nltk.sent_tokenize(raw)\n\t#sents1=raw.split('\\n' );\n\tsents1=['Boston', 'Buenos Aires', 'Chicago', 'Dallas', 'Mexico City', 'New York', 'Sao Paulo', 'San Francisco', 'Toronto', 'Dubai', 'Frankfurt', 'Johannesburg', 'Limassol', 'London', 'Madrid', 'Milan', 'Moscow', 'Paris', 'Warsaw', 'Beijing', 'Hong Kong', 'Seoul', 'Shanghai', 'Singapore', 'Sydney', 'Tokyo', 'India', 'Giza', 'Tel Aviv', 'Montreal', 'Toronto', 'South San Francisco', 'West Chester', 'Edinburgh', 'Grenoble', 'Port Louis', 'Saint Cloud', 'Melbourne', 'Shenzhen', 'New', 'Hong','Jersey City', 'DIFC', 'DIFC - Dubai','Frankfurt am Main']\n\ti=0\n\tfor t in sents:\n\t\t if(t in sents1):\n\t\t \ti=i+1\n\tg=0\t \t\n\tfor t in sents:\n\t\tif(re.search('.*JOURNALISTS.*', t)): \n\t\t\tg=g+1\n\tif (i==2):\n\t\t#bo.write('------------------------------------\\n'+raw+'\\n'+str(i)+' '+str(j)+'\\n------------------------------------\\n')\n\t\tcount=0\n\t\tk=0\n\t\twhile(k<len(sents)):\n\t\t\tif(sents[k] in sents1):\n\t\t\t#print sents[i]\n\t\t\t#print i\n\t\t\t\tcount=count+1\n\t\t\t\tif(count%2==1):\n\t\t\t\t\ta=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\telse:\n\t\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\t\tj=0\n\t\t\t\t\twhile(j<len(t)):\n\t\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\t\tfoo.close()\n\t\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\telif(i==0 or g==3):\n\t\tk=0\n\t\tpi=0\t#place index if 1 that means a place found\n\t\twhile(k<len(words)):\n\t\t\tif (words[k] in sents1):\n\t\t\t\tpi=pi+1\n\t\t \t\tif(words[k]=='New'):\n\t\t \t\t\twords[k]='New York'\n\t\t \t\tif(words[k]=='Hong'):\n\t\t \t\t\twords[k]='Hong Kong'\n\t\t \t\t\t \t\n\t\t \t\tplace=words[k]\n\t\t \t\t\n\t\t\tk=k+1\n\t\t\n\t\tl=0\n\t\tcount=0\n\t\t\n\t\tif (pi==0): #defalut location is New York\n\t\t\tplace='New York'\n\t\t\t\n\t\ta=[place,sents[0],sents[1],sents[2]]\n\t\twhile(l<len(sents)):\n\t\t\tif(sents[l]=='' and count<1):\n\t\t\t\tcount=count+1\n\t\t\t\tif (pi==0):\n\t\t\t\t\tb=['New York', sents[l+1], sents[l+2],sents[l+3]]\n\t\t\t\telse:\n\t\t\t\t\tb=[place, sents[l+1], sents[l+2],sents[l+3]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\tl=l+1\n\t\t\t\n\t\t\t\n\t\t\t\n\telif(i==1 and (sents[0] in sents1)):\n\t\ta=[sents[0],sents[1],sents[2],sents[3]]\n\t\tk=0\n\t\twhile(k<len(sents)):\n\t\t\tif(sents[k] in sents1):\n\t\t\t\t#print sents[i]\n\t\t\t\t#print i\n\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\telif(i==1 and (sents[0] not in sents1)):\n\t\tk=0\n\t\tprint 'yes'\n\t\twhile(k<len(sents)):\n\t\t\tif(sents[k] in sents1):\n\t\t\t\t#print sents[i]\n\t\t\t\t#print i\n\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\ta=[sents[k],sents[0],sents[1],sents[2]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\t\n\tprint t\n\tfoo.close()\n\tbo.close()\n" }, { "alpha_fraction": 0.6793168783187866, "alphanum_fraction": 0.703984797000885, "avg_line_length": 18.518518447875977, "blob_id": "ba140298aeb78ddec152ee6eef3d3688be02223f", "content_id": "e97c388d3d0a79f5bccb36c59bdcb338fc7c8e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 60, "num_lines": 27, "path": "/a_data1.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nimport re, nltk, psycopg2 \nimport dates, a_name\nimport extract_name\nfrom extract_name import extractor\nfrom a_name import analysts\nfrom dates import converter\n\nd=0\nfo = open(\"/home/finance/reports2sql/r_fil_date.txt\", \"rb+\")\nraw=fo.read()\nlocs=nltk.word_tokenize(raw)\n#print locs\n\"\"\"string='a'\nfor t in locs:\n\ttokens=t.split('/')\n\tstring=string+' '+tokens[4]\"\"\"\n\t\nfor t in locs:\n\td=d+extractor(t)\n\n#print d\n#conn.close()\n\"\"\"tok=nltk.word_tokenize(string)\ntok=tok[1:]\n#print tok[0]\nprint tok[tok.index('413000')+1]\"\"\"\n" }, { "alpha_fraction": 0.6892778873443604, "alphanum_fraction": 0.724288821220398, "avg_line_length": 19.772727966308594, "blob_id": "612dc7d23d41eeb43d0e14092c00d60025a3ad28", "content_id": "109b4d4bd13e43f400a362a1afe0b9e4e7a75d4e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "no_license", "max_line_length": 47, "num_lines": 22, "path": "/f_error.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "import os\nimport re, nltk \nimport dates, a_name\nimport extract_name\nfrom extract_name import extractor\nfrom a_name import analysts\nfrom dates import converter\nfo = open(\"/home/finance/r_fil_loc.txt\", \"rb+\")\nraw=fo.read()\nlocs=nltk.word_tokenize(raw)\n#print locs\nstring='a'\nfor t in locs:\n\ttokens=t.split('/')\n\tstring=string+' '+tokens[4]\n\t\ntok=nltk.word_tokenize(string)\ntok=tok[1:]\n#print tok[0]\nprint tok.index('761840')\n\nprint tok[tok.index('372050')+1]\n" }, { "alpha_fraction": 0.5522788166999817, "alphanum_fraction": 0.5891420841217041, "avg_line_length": 25.54804229736328, "blob_id": "65a9771169103103048cdabb932ec5fb14e6414d", "content_id": "1838155e1c7ca3fe4fe0d9396c36b20f6fcc2be1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7460, "license_type": "no_license", "max_line_length": 551, "num_lines": 281, "path": "/done/analysts_name.py", "repo_name": "vishaljain3991/reports2sql", "src_encoding": "UTF-8", "text": "#==========================\n#PLEASE READ THE COMMENTS\n#==========================\n#This is one of the most important file of the lot. In the file analysts function\n#is defined. With this function we extract analyst names, their designation,\n#departments and positions.\n \nimport os\nimport re, nltk \n\ndef analysts(string):\n\tbo=open(\"/home/finance/reports2sql/types.txt\",\"ab+\")\n\t\n\tfoo=open(string)\n\traw=foo.read()\n\t\n\t#Next, we split analysts_YYYY-MM-YY.txt into multiple lines by using \\n delimiter\n\tsents=raw.split('\\n' );\n\t\n\t#We also tokenize the file into words\n\twords=nltk.word_tokenize(raw); \n\tfoo.close()\n\t\n\t#sents1 contains all the places in which Moody's have offices.\n\tsents1=['Boston', 'Buenos Aires', 'Chicago', 'Dallas', 'Mexico City', 'New York', 'Sao Paulo', 'San Francisco', 'Toronto', 'Dubai', 'Frankfurt', 'Johannesburg', 'Limassol', 'London', 'Madrid', 'Milan', 'Moscow', 'Paris', 'Warsaw', 'Beijing', 'Hong Kong', 'Seoul', 'Shanghai', 'Singapore', 'Sydney', 'Tokyo', 'India', 'Giza', 'Tel Aviv', 'Montreal', 'Toronto', 'South San Francisco', 'West Chester', 'Edinburgh', 'Grenoble', 'Port Louis', 'Saint Cloud', 'Melbourne', 'Shenzhen', 'New', 'Hong','Jersey City', 'DIFC', 'DIFC - Dubai','Frankfurt am Main']\n\t\n\ti=0\n\t\n\t#Firstly we count the no. of times a place has been a token in sents \n\tfor t in sents:\n\t\t if(t in sents1):\n\t\t \ti=i+1\n\t#Secondly we count the number of tokens which have JOURNALISTS mentioned.\n\tg=0\t \t\n\tfor t in sents:\n\t\tif(re.search('.*JOURNALISTS.*', t)): \n\t\t\tg=g+1\n\t#Now comes the most important part of the code. Here we decide which methodology\n\t#to choose for extraxting the features depending on the value of i and g\n\t\n\t#if i=2 and j is free to have any value\n\t#so analysts_YYYY-MM-YY.txt file looks somewhat like this as in the example shown\n\t#below\n\t#--------------------------------\n\t#\tNew York\n\t#\tMichael Levesque\n\t#\tSenior Vice President\n\t#\tCorporate Finance Group\n\t#\tMoody's Investors Service\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\n\t#\tNew York\n\t#\tLenny J. Ajzenman\n\t#\tSenior Vice President\n\t#\tCorporate Finance Group\n\t#\tMoody's Investors Service\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\n\t#\tMoody's Investors Service\n\t#\t250 Greenwich Street\n\t#\tNew York, NY 10007\n\t#\tU.S.A.\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\t#--------------------------------\n\t\n\tif (i==2):\n\t\tcount=0\n\t\tk=0\n\t\twhile(k<len(sents)):\n\t\t\t#Next we search whether any of the tokens is in sents1. If we find such \n\t\t\t#token, we immediately know that the succeding sentences are name of the \n\t\t\t#analyst, his designation and department.\n\t\t\tif(sents[k] in sents1):\n\t\t\t\n\t\t\t\tcount=count+1\n\t\t\t\tif(count%2==1):\n\t\t\t\t\t#a contains information for the first analyst.\n\t\t\t\t\ta=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\telse:\n\t\t\t\t\t#b contains information for the second analyst.\n\t\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\t\t\n\t\t\t\t\t#We concatenate a and b to form t that contains \n\t\t\t\t\t#info about both the analyst.\n\t\t\t\t\tt=a+b\n\t\t\t\t\t\n\t\t\t\t\t#In the next while loop, actually find those\n\t\t\t\t\t#entries in t that have apostrophe and remove it. \n\t\t\t\t\t#This is done to ensure that the entries with\n\t\t\t\t\t#apostrophe are actually entered in the database.\n\t\t\t\t\t#If we try to enter the data, POSTGRES throws an\n\t\t\t\t\t#error. I could not find an alternate way to avoid \n\t\t\t\t\t#error.\n\t\t\t\t\tj=0\n\t\t\t\t\twhile(j<len(t)):\n\t\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\t\tfoo.close()\n\t\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\t\t\t\n\t#When i=0 (Ignore g=3). \n\t#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like these\n\t#------------------------------------------------\n\t#\tMichael Levesque, CFA\t\t\t\n\t#\tSenior Vice President\n\t#\tCorporate Finance Group\n\t#\tMoody's Investors Service, Inc.\n\t#\t250 Greenwich Street\n\t#\tNew York, NY 10007\n\t#\tU.S.A.\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\n\t#\tPeter H. Abdill, CFA\n\t#\tMD - Corporate Finance\n\t#\tCorporate Finance Group\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\n\t#\tReleasing Office:\n\t#\tMoody's Investors Service, Inc.\n\t#\t250 Greenwich Street\n\t#\tNew York, NY 10007\n\t#\tU.S.A.\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\t#--------------------------------------------------\n\t#\tMichael J. Mulvaney\n\t#\tManaging Director\n\t#\tCorporate Finance Group\n\t#\n\t#\tCharles X. Tan\n\t#\tVice President - Senior Analyst\n\t#\tCorporate Finance Group\n\t#--------------------------------------------------\n\telif(i==0 or g==3):\n\t\tk=0\n\t\tpi=0\t#place index if 1 that means a place found\n\t\twhile(k<len(words)):\n\t\t\t#We determine whether any of the words that we obtained by tokenization\n\t\t\t#is a location.\n\t\t\tif (words[k] in sents1):\n\t\t\t\tpi=pi+1\n\t\t\t\t#If a word happens to be 'New', then the place is most probably New York\n\t\t \t\tif(words[k]=='New'):\n\t\t \t\t\twords[k]='New York'\n\t\t \t\t#If a word happens to be 'Hong', then the place is most probably Hong Kong\n\t\t \t\tif(words[k]=='Hong'):\n\t\t \t\t\twords[k]='Hong Kong'\n\t\t \t\t\n\t\t \t\t#place variable stores the location that was found\t \t\n\t\t \t\tplace=words[k]\n\t\t \t\t\n\t\t\tk=k+1\n\t\t\n\t\tl=0\n\t\tcount=0\n\t\t\n\t\t#if we still find no word that happens to be one og the locations then the default location\n\t\t#of the analyst is New York (this is our assumption)\n\t\tif (pi==0): \n\t\t\tplace='New York'\n\t\t\n\t\t#a stores the relevant features of first analyst\t\n\t\ta=[place,sents[0],sents[1],sents[2]]\n\t\t\n\t\twhile(l<len(sents)):\n\t\t\tif(sents[l]=='' and count<1):\n\t\t\t\tcount=count+1\n\t\t\t\tif (pi==0):\n\t\t\t\t\tb=['New York', sents[l+1], sents[l+2],sents[l+3]]\n\t\t\t\telse:\n\t\t\t\t\tb=[place, sents[l+1], sents[l+2],sents[l+3]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\t\n\t\t\t\t\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\tl=l+1\n\t\t\t\n\t\t\t\n\t#When i=1 and initial sentence is a location in sents1\n\t#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like this\n\t#---------------------------------\n\t#\tNew York\n\t#\tPamela Stumpp\n\t#\tManaging Director\n\t#\tCorporate Finance Group\n\t#\tMoody's Investors Service\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\t#\n\t#\tThomas S. Coleman\n\t#\tSenior Vice President\n\t#\tCorporate Finance Group\t\n\t#----------------------------------\t\n\telif(i==1 and (sents[0] in sents1)):\n\t\ta=[sents[0],sents[1],sents[2],sents[3]]\n\t\tk=0\n\t\twhile(k<len(sents)):\n\t\t\tif(sents[k] in sents1):\n\t\t\t\t\n\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\t\t\t\n\t#When i=1 and initial sentence is not a location in sents1\n\t#For e.g. analysts_YYYY-MM-YY.txt file looks somewhat like this\n\t#-----------------------------------\n\t#\tMark Gray\n\t#\tManaging Director\n\t#\tCorporate Finance Group\n\t#\n\t#\tNew York\n\t#\tDavid Neuhaus\n\t#\tVP - Senior Credit Officer\n\t#\tCorporate Finance Group\n\t#\tMoody's Investors Service\n\t#\tJOURNALISTS: 212-553-0376\n\t#\tSUBSCRIBERS: 212-553-1653\n\t#-----------------------------------\n\telif(i==1 and (sents[0] not in sents1)):\n\t\tk=0\n\t\tprint 'yes'\n\t\twhile(k<len(sents)):\n\t\t\tif(sents[k] in sents1):\n\t\t\t\tb=[sents[k], sents[k+1], sents[k+2],sents[k+3]]\n\t\t\t\ta=[sents[k],sents[0],sents[1],sents[2]]\n\t\t\t\tt=a+b\n\t\t\t\t\n\t\t\t\tj=0\n\t\t\t\twhile(j<len(t)):\n\t\t\t\t\tif(\"'\" in t[j]):\n\t\t\t\t\t\tt[j]=t[j][:t[j].index(\"'\")]+t[j][t[j].index(\"'\")+1:]\n\t\t\t\t\t\tprint t[j]\n\t\t\t\t\t\n\t\t\t\t\tj=j+1\n\n\t\t\t\t\t\t\n\t\t\t\n\t\t\t\tfoo.close()\n\t\t\t\treturn t\n\t\t\n\t\t\tk=k+1\n\t\n\tprint t\n\tfoo.close()\n\tbo.close()\n" } ]
12
tomithy/GSOC-2012-Demo
https://github.com/tomithy/GSOC-2012-Demo
38ce1d11e090ed1ad42be5c81399599ccc8263fe
09b5abc0a64ad108f1f31e7c09e8e0ad83beb97d
00ab1755c98bc5a80586dacbebd11fb9a6b33ffd
refs/heads/master
2021-01-10T19:25:35.954201
2012-04-01T10:43:42
2012-04-01T10:43:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6665065884590149, "alphanum_fraction": 0.676593005657196, "avg_line_length": 33.51381301879883, "blob_id": "9d4b0fb4a11c1a359a55e3bda914ab9a9fc67e72", "content_id": "b1bfee66918898f686547a01422089e624c73f85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6246, "license_type": "no_license", "max_line_length": 229, "num_lines": 181, "path": "/generateFolderXml.py", "repo_name": "tomithy/GSOC-2012-Demo", "src_encoding": "UTF-8", "text": "import lxml\n\n__author__ = 'Tomithy'\n\n# Written for demostration purposes for GSOC 2012\n\n# This script uses the schema defined by PhyloXML which is available at http://www.phyloxml.org/\n\n# Suggested improvements:\n # 1. Use a dict to store and retrieve ancestor instead of the current recursive functions\n # 2. Change the generate clade method to have no side effects in lieu of the FP style, easier testing?\n\nimport os\nimport sys\nfrom lxml import etree\n\nrootdir = os.getcwd() + \"/Galaxy_scripts\" #\"directory which is rendered to PhyloXML\"\n\n\nOUTFILE_NAME = \"scripts.xml\"\nBRANCH_LENGTH = 5.0 #determines how far/how deep each directory is\n\ndef buildDirXml():\n\n print rootdir\n rootString = \"/\".join(rootdir.split(\"/\")[:-1])\n\n phyloroot = buildGenericPhyloXMLStructure()\n claderoot = phyloroot[1]\n\n folderCount = 0 #folder stats\n fileList = []\n fileSize = 0\n\n for folderDir, subFolders, files in os.walk(rootdir):\n\n folderCount += len(subFolders)\n parentNode = getFolderParentNode(claderoot, rootString, folderDir)\n\n folderName = folderDir.split(\"/\")[-1]\n\n #generating 2 nodes over here because we need one to serve as internal node, with names, and the other to display to the user on the phylogeny tree\n folderInternalNode = generateClade(parentNode, folderName)\n folderActualNode = generateClade(folderInternalNode, \"Folder:\" + folderName, tooltip=\"Folder has no filesize\" )\n\n for file in files:\n\n if file == \".DS_Store\":\n continue\n if file [-3:] == \".py\":\n componentType = \"python\"\n elif file [-3:] == \".sh\":\n componentType = \"bash\"\n else:\n componentType = \"others\"\n f = os.path.join(folderDir,file)\n fileSize = fileSize + os.path.getsize(f)\n fileSize = float(fileSize) / 1024\n tooltipFS = \"Filesize: \" + str(fileSize) + \" kb\"\n fileNode = generateClade(folderInternalNode, file, chartIntensity=fileSize, componentType=componentType,tooltip=tooltipFS )\n\n fileList.append(f)\n print file\n\n\n print etree.tostring(phyloroot, pretty_print=True)\n\n xmlOutFile = open (OUTFILE_NAME, \"wt\")\n\n xmlOutString = etree.tostring(phyloroot)\n xmlOutString = addPhyloXMLroot(xmlOutString)\n\n print xmlOutString\n xmlOutFile.write(xmlOutString)\n xmlOutFile.close()\n\n print(\"Total Size is {0} bytes\".format(fileSize))\n print(\"Total Files\" , len(fileList))\n print(\"Total Folders \", folderCount)\n\n\n# Important: we are guranteed that parent would be created before child, because os.walk's default is walking from top-down\ndef getFolderParentNode(phyloRoot, rootString, folderDir):\n parentNode = phyloRoot\n relativePath = folderDir[len(rootString) + 1:] # using the same property that all subfolders\n parentNodeTagXPathTextList = relativePath.split(\"/\")[:-1]\n# print parentNodeTagXPathTextList\n for text in parentNodeTagXPathTextList:\n XpathExpression = 'clade/name[text()=\"' + text + '\"]/..'\n# print XpathExpression\n parentNodeList = parentNode.xpath(XpathExpression)\n# print \"Parent Node list\", parentNodeList\n if len(parentNodeList) != 0 :\n parentNode = parentNodeList[0]\n return parentNode\n\n\n\ndef addPhyloXMLroot(xmlString):\n #remove newline from xmlString\n xmlString = '<phyloxml xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.phyloxml.org http://www.phyloxml.org/1.10/phyloxml.xsd\" xmlns=\"http://www.phyloxml.org\">' + xmlString + \"</phyloxml>\"\n return xmlString\n\ndef buildGenericPhyloXMLStructure(rooted=\"false\"):\n\n root = etree.Element(\"phylogeny\", rooted=rooted) # We use the unique child as root first since the true root contains attribute that cannot be added by lxml\n root = addRenderAttributes(root)\n etree.SubElement(root, \"clade\")\n\n return root\n\n\n\ndef addRenderAttributes(root):\n\n render = etree.SubElement(root, \"render\")\n\n parameters = etree.SubElement(render, \"parameters\") # now we add parameters to it\n circular = etree.SubElement(parameters, \"circular\")\n bufferRadius = etree.SubElement(circular, \"bufferRadius\")\n bufferRadius.text = \"0.5\"\n\n rectangular = etree.SubElement(parameters, \"rectangular\")\n alignRight = etree.SubElement(rectangular, \"alignRight\")\n alignRight.text = \"1\"\n bufferX = etree.SubElement(rectangular, \"bufferX\")\n bufferX.text = \"300\"\n\n charts = etree.SubElement(render, \"charts\") #Adding chart display options\n component = etree.SubElement(charts, \"content\", type=\"bar\", fill=\"#666\", width=\"0.2\")\n component = etree.SubElement(charts, \"component\", type=\"binary\", thickness=\"10\")\n\n\n\n styles = etree.SubElement(render, \"styles\") # Adding Styles\n styles.append (generateStyle(\"bash\", \"#6633FF\", \"#DDD\"))\n styles.append (generateStyle(\"python\", \"#FF6600\", \"#DDD\")) #to highligh python code\n styles.append (generateStyle(\"others\", \"#d7e3bc\", \"#DDD\")) #to highligh others\n etree.SubElement(styles, \"barChart\", fill='#000')\n\n return root\n\n\ndef generateStyle( tag=\"\", fill=\"#000\", stroke=\"#FFF\"):\n style = etree.Element(tag, fill=fill, stroke=stroke )\n print style\n return style\n\ndef generateClade(parent, name=\"\", branchLen=BRANCH_LENGTH, tooltip=\"\", uri=\"\", componentType=None, bg=\"\", chartIntensity=0 ):\n\n node = etree.SubElement(parent, \"clade\")\n\n if name is not \"\":\n nameNode = etree.SubElement(node, \"name\")\n nameNode.text = name\n nameNode.set(\"bgStyle\", bg)\n\n branchLenNode = etree.SubElement(node, \"branch_length\")\n branchLenNode.text = str(branchLen)\n\n #adding annotation\n annotationNode = etree.SubElement(node, \"annotation\")\n descNode = etree.SubElement(annotationNode, \"desc\")\n descNode.text = tooltip\n# uriNode = etree.SubElement(annotationNode, \"uri\")\n# uriNode.text = uri\n\n\n chart = etree.SubElement(node, \"chart\")\n content = etree.SubElement(chart, \"content\")\n content.text = str(chartIntensity)\n component = etree.SubElement(chart, \"component\")\n component.text = componentType\n\n return node\n\n\n\nif __name__ == '__main__':\n# buildXml()\n buildDirXml()" }, { "alpha_fraction": 0.6442516446113586, "alphanum_fraction": 0.6626898050308228, "avg_line_length": 26.909090042114258, "blob_id": "f5bab6780a5ba489fea477073aa36b947ef21227", "content_id": "a38aebe8a6573b062635f0825fde61f6514cbb41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 66, "num_lines": 33, "path": "/Archive/PhyloXMLGenerator.py", "repo_name": "tomithy/GSOC-2012-Demo", "src_encoding": "UTF-8", "text": "from lxml import etree\n\ndef buildXml():\n root = etree.Element(\"root\", interesting=\"totally\")\n root.append( etree.Element(\"child1\") )\n\n etree.SubElement(root, \"child\").text = \"Child 1\"\n etree.SubElement(root, \"child\").text = \"Child 2\"\n etree.SubElement(root, \"another\").text = \"Child 3\"\n\n # root.insert(0, etree.Element(\"Child0\"))\n child2 = root[1]\n\n\n etree.SubElement(child2, \"Child0ofchild2\").text = \"HelloWorld\"\n child0ofChild2 = child2[0]\n\n etree.SubElement(root, \"Inbrackets\").text = \"Here we go\"\n\n anotherElement = etree.Element(\"AppedTest\")\n anotherElement.text = \"Another Text\"\n root[1][0].append(anotherElement)\n\n # root.text = \"TEXT\"\n\n for child in root:\n print child.tag\n\n print etree.tostring(root, pretty_print=True)\n\n# creates a folder clade and adds it to the passed it parent\ndef addFolderClade(parent, foldername, uri=\"\", tooltip=\"\"):\n pass\n\n" } ]
2
ajackal/arctic-swallow
https://github.com/ajackal/arctic-swallow
309d5efa25742e5e61d93398470162a661786afc
b4ea974d3c716d0d7e3cbbeeefeb1a9aa6eefe85
f2ba0f176d3bdab9a0e9c4728ec9013fa0041a9c
refs/heads/master
2021-09-19T09:28:21.576163
2017-10-30T20:16:16
2017-10-30T20:16:16
108,573,073
1
1
null
null
null
null
null
[ { "alpha_fraction": 0.5369595289230347, "alphanum_fraction": 0.5536959767341614, "avg_line_length": 27.719999313354492, "blob_id": "3c75839b7a0ce7d22b1fd936cd921c7c0d999133", "content_id": "0a1ff204136765e12defb58788b9a7e695228faa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 717, "license_type": "no_license", "max_line_length": 85, "num_lines": 25, "path": "/ipt_config.sh", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nsudo iptables -t nat -F\n\nvalue=$(<ports.txt)\nfor i in $value; do\n if [ $i -le 1024 ] && [ $i -ge 100 ]; then\n p=8$i;\n echo $p;\n elif [ $i -le 99 ]; then\n p=80$i;\n echo $p;\n else\n p=$i\n fi\n if [ $p -eq $i ]; then\n echo \"[!] Non-privelaged port, no forwarding necessary \" $p\n else\n sudo iptables -t nat -A PREROUTING -p tcp --dport $i -j REDIRECT --to-port $p\n sudo iptables -t nat -A OUTPUT -p tcp --dport $i -j REDIRECT --to-port $p\n fi\n# sudo iptables -t nat -A PREROUTING -p tcp --dport $i -j REDIRECT --to-port $p\n# sudo iptables -t nat -A OUTPUT -p tcp --dport $i -j REDIRECT --to-port $p\ndone\nsudo iptables -t nat -S" }, { "alpha_fraction": 0.6683354377746582, "alphanum_fraction": 0.694618284702301, "avg_line_length": 20.026315689086914, "blob_id": "cd2d0f213e49b23124aefe06bcf46a33e064e601", "content_id": "3de59b237c7076e5ab9f7a497c9dfefae1230307", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 57, "num_lines": 38, "path": "/tests/client.py", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "import socket\nimport sys\nfrom itertools import product\n\n# builds key list\ncharacters_to_use = \"0123456789\"\nkeys_to_try = product(characters_to_use, repeat=2)\n\nfor key_pair in keys_to_try:\n key = key_pair[0] + key_pair[1]\n # print key\n\n# define ip address and port to use\nip_addr = 'localhost'\ntcp_port = 80\n\nbuffer_size = 1024\nmessage_to_server = \"bite my shiny metal @ss\"\n\n# opens socket\ntry:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nexcept Exception as e:\n print e\n\n# connects socket to ip address and port defined above\ns.connect((ip_addr, tcp_port))\n\ntry:\n s.send(message_to_server)\n print \"[*] sending: {0}\".format(message_to_server)\nexcept socket.error as e:\n print e\n\ndata = s.recv(buffer_size)\ns.close()\n\nprint \"[*] Response from server: {0}\".format(data)\n" }, { "alpha_fraction": 0.6485260725021362, "alphanum_fraction": 0.6746031641960144, "avg_line_length": 26.5625, "blob_id": "a090a8fc851e41093a333553ea314fafbf8e140b", "content_id": "6028f8ebd58058735824f2de7407ed41f92070b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "no_license", "max_line_length": 52, "num_lines": 32, "path": "/tests/read_smb_header_test.py", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "from scapy.all import *\nfrom scapy.utils import PcapReader\n\n# SMB HEADER\n# Server Component: SMB\n# SMB Command: Negotiate Protocol (0x72)\nsmb_header = \"0xff0x530x4d0x420x72\"\n\n# SMB Response: Win10 Home\n# File to read binary from:\nsmb_response_file = \"pcaps\\\\smb_response_win10\"\nwith open(smb_response_file, 'rb') as f:\n # Set variable with SMB response\n smb_response_win10_home = f.read()\n\n# Get DATA from socket\nwith open(\"pcaps\\\\nmap_smb_scan.pcapng\", 'rb') as p:\n raw = p.read()\n pcap_hex = \"\"\n for i in raw:\n read_pcap_hex = hex(ord(i))\n pcap_hex += read_pcap_hex\n # print read_pcap_hex\n\n# Check DATA for SMB Header in Hex\nif smb_header in pcap_hex:\n # Send response if Header is found.\n # self.request.sendall(smb_response_win10_home)\n print \"SMB Header detected.\"\n print \"Sending SMB response.\"\nelse:\n print \"Nothing found!\"\n" }, { "alpha_fraction": 0.5576869249343872, "alphanum_fraction": 0.5738895535469055, "avg_line_length": 47.37387466430664, "blob_id": "93d9c8ccd85acbb876b6d59044611d43e499fa3c", "content_id": "6002ba7fe62f0379d09b39c912c34ed06028a036", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10739, "license_type": "no_license", "max_line_length": 119, "num_lines": 222, "path": "/handlers.py", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "import SocketServer\nfrom datetime import datetime\nimport binascii\n\nBUFFER_SIZE = 1024\n\n\nclass SuperHandler(SocketServer.StreamRequestHandler):\n \"\"\" Defines a super class that has the logging events and sends the correct response file. \"\"\"\n def write_event_log_event(self, event):\n \"\"\" Writes all events to 'event.log' with date & time. \"\"\"\n log_time = str(datetime.now())\n print event\n log_file = \"event.log\"\n with open(log_file, 'a') as event_log:\n event_log.write(log_time + event + \"\\n\")\n\n def write_error_log_event(self, error):\n \"\"\" Writes all errors to 'error.log' with date & time. \"\"\"\n log_time = str(datetime.now())\n print error\n log_file = \"error.log\"\n with open(log_file, 'a') as error_log:\n error_log.write(log_time + error + \"\\n\")\n\n def send_response(self, response_file):\n \"\"\" Send Response\n 1. opens the correct response_file\n 2. reads binary to buffer\n 3. sends the response to the client\n \"\"\"\n with open(response_file, 'rb') as pkt_capture:\n response = pkt_capture.read()\n self.request.sendall(response)\n print \"[*] Response packet sent.\"\n\n\nclass TCPEchoHandler(SuperHandler):\n \"\"\" TCP Echo Handler listens on any port not previously listed.\n It simply echos any data that it receives back to the client.\n \"\"\"\n\n def handle(self):\n try:\n self.DATA = self.request.recv(BUFFER_SIZE).strip()\n event = \"[*] {0} wrote: {1}\".format(self.client_address[0], self.DATA)\n self.write_event_log_event(event)\n print event\n self.request.sendall(self.DATA)\n except Exception as error:\n log_error = \"[!] Error receiving data> {0} : {1}\".format(self.client_address[0], error)\n print log_error\n self.write_error_log_event(str(log_error))\n\n\nclass TelnetHandler(SuperHandler):\n \"\"\" Telnet Handler listens on port 8023 for telnet requests & mimics an XP telnet service. \"\"\"\n def handle(self):\n telnet_xp_response_bin = \"pcaps/telnet_xp_response\"\n try:\n self.DATA = self.request.recv(BUFFER_SIZE).strip()\n event = \"[*] {0} wrote over Port 23: {1}\".format(self.client_address[0], self.DATA)\n self.write_event_log_event(event)\n response_file = telnet_xp_response_bin\n self.send_response(response_file)\n # self.request.sendall(\"login: \")\n except Exception as error:\n log_error = \"[!] Error receiving data> {0} : {1}\".format(self.client_address[0], error)\n print log_error\n self.write_error_log_event(str(log_error))\n\n\nclass NetBiosHandler(SuperHandler):\n \"\"\" NetBios Handler listens for and sends reponses for NetBios protocol. \"\"\"\n def handle(self):\n netbios_error_bin = \"pcaps/netbios_error\"\n try:\n self.DATA = self.request.recv(BUFFER_SIZE).strip()\n event = \"[*] {0} wrote over Port 139: {1}\".format(self.client_address[0], self.DATA)\n self.write_event_log_event(event)\n response_file = netbios_error_bin\n self.send_response(response_file)\n except Exception as error:\n log_error = \"[!] Error receiving data> {0} : {1}\".format(self.client_address[0], error)\n print log_error\n self.write_error_log_event(str(log_error))\n\n\nclass MsrpcHandler(SuperHandler):\n \"\"\" MSRPC Handler handles any port defined in the MSRPC port list \"\"\"\n def handle(self):\n msrpc_error_bin = \"pcaps/msrpc_error\"\n try:\n self.DATA = self.request.recv(BUFFER_SIZE).strip()\n event = \"[*] {0} wrote over Port: {1}\".format(self.client_address[0], self.DATA)\n self.write_event_log_event(event)\n response_file = msrpc_error_bin\n self.send_response(response_file)\n except Exception as error:\n log_error = \"[!] Error receiving data> {0} : {1}\".format(self.client_address[0], error)\n print log_error\n self.write_error_log_event(str(log_error))\n\n\nclass SMBHandler(SuperHandler):\n \"\"\" SMB Handler binds to port 8445 for to run unprivileged. \"\"\"\n\n def check_smb_header(self, pkt_hex):\n \"\"\" SMB HEADER\n Server Component: SMB\n SMB Command: Negotiate Protocol (0x72)\n SMB Negotiate Request NTLM 0.12\n Session Setup: NT STATUS_SUCCESS\n Session Setup: NT STATUS_ACCOUNT_DISABLED\n Session Setup: NTLMSSP\n Session close: NT STATUS_SUCCESS\n \"\"\"\n smb_header = {\"header\": \"\\xff\\x53\\x4d\\x42\\x72\",\n \"negotiate_ntlm\": \"\\x02\\x4e\\x54\\x20\\x4c\\x4d\\x20\\x30\\x2e\\x31\\x32\\x00\",\n \"session_setup\": \"\\xff\\x53\\x4d\\x42\\x73\\x00\\x00\\x00\\x00\",\n \"account_disabled\": \"\\xff\\x53\\x4d\\x42\\x73\\x72\\x00\\x00\\xc0\",\n \"negotiate_ntlmssp\": \"\\x4e\\x54\\x4c\\x4d\\x53\\x53\\x50\\x00\",\n \"session_close\": \"\\xff\\x53\\x4d\\x42\\x74\\x00\\x00\\x00\\x00\"\n }\n # TODO: Clean up the SMB request/response method.\n # SMB Header NMAP request all Dialects:\n smb_nmap_all_dialects = \"pcaps/smb_nmap_all_dialects\"\n with open(smb_nmap_all_dialects, 'rb') as pkt_capture:\n smb_nmap_all_dialects_bytes = pkt_capture.read()\n pkt_hex_nmap_dialects = \"\"\n for i in smb_nmap_all_dialects_bytes:\n pkt_hex = binascii.hexlify(i)\n pkt_hex_nmap_dialects += \"\\\\x\" + pkt_hex\n # SMB Session Setup andX Request: \\guest\n smb_nmap_setup_andx = \"pcaps/smb_nmap_guest_connect\"\n with open(smb_nmap_setup_andx, 'rb') as pkt_capture:\n smb_nmap_guest_connect_bytes = pkt_capture.read()\n pkt_hex_nmap_guest_connect = \"\"\n for i in smb_nmap_guest_connect_bytes:\n pkt_hex = binascii.hexlify(i)\n pkt_hex_nmap_guest_connect += \"\\\\x\" + pkt_hex\n # SMB Response: Win10 Home\n # File to read binary from:\n smb_response = {\"negotiate_response\": \"pcaps/smb_response_win10\",\n \"negotiate_ntlm\": \"pcaps/smb_negotiate_ntlm_workgroup\",\n \"session_startup\": \"pcaps/smb_session_response_win10\",\n \"guest_connect\": \"pcaps/smb_nmap_guest_connect\",\n \"account_disabled\": \"pcaps/smb_account_disabled_response_win10\",\n \"negotiate_ntlm_win10\": \"pcaps/smb_ntlmssp_response_win10\",\n \"session_close\": \"pcaps/smb_session_close_response\"\n }\n # smb_negotiate_ntlm_response = \"pcaps/smb_ntlm_response_win10\"\n try:\n if pkt_hex.find(smb_header['header']):\n if pkt_hex.find(pkt_hex_nmap_dialects):\n event = \"[*] SMB Header - NMAP request for all dialects from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['negotiate_response'])\n if pkt_hex.find(smb_header['negotiate_ntlm']):\n event = \"[*] SMB Header - Negotiate Session NTLM detected from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['negotiate_ntlm'])\n else:\n # Send response if Header is found.\n event = \"[*] SMB Header - Negotiate Session was detected from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['negotiate_response'])\n if pkt_hex.find(smb_header['session_setup']):\n if pkt_hex.find(pkt_hex_nmap_guest_connect):\n event = \"[*] SMB Header - Session Setup and X detected from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['guest_connect'])\n if pkt_hex.find(smb_header['negotiate_ntlmssp']):\n event = \"[*] SMB Header - Session Startup NTLMSSP detected from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['negotiate_ntlmssp'])\n else:\n # Send account disabled response to start up request.\n event = \"[*] SMB Header - Session Setup detected from {0}\".format(self.client_address[0])\n self.write_event_log_event(event)\n self.send_response(smb_response['session_startup'])\n if pkt_hex.find(smb_header['account_disabled']):\n # Send LANMAN info to requester\n event = \"[*] SMB Header - LANMAN information requested from {0}\".format(self.client_address[0])\n print event\n self.write_event_log_event(event)\n self.send_response(smb_response['account_disabled'])\n if pkt_hex.find(smb_header['session_close']):\n # Send session close.\n event = \"[*] SMB Header - Session Close detected from {0}\".format(self.client_address[0])\n print event\n self.write_event_log_event(event)\n self.send_response(smb_response['session_close'])\n else:\n self.request.sendall(self.DATA)\n except Exception as error:\n log_error = \"[!] Error receiving data from socket with {0} : {1}\".format(self.client_address[0], error)\n self.write_error_log_event(str(log_error))\n\n def handle(self):\n \"\"\" Main methods in the SMB Handler\n 1. reads the inbound request from the client\n 2. determines the SMB request type by reading the SMB header\n 3. defines the appropriate response and file\n 4. calls the reponse method\n \"\"\"\n # Get DATA from socket\n try:\n self.DATA = self.request.recv(BUFFER_SIZE).strip()\n # Convert DATA to hex\n pkt_hex = \"\"\n for i in self.DATA:\n # Converts each byte to hex\n pkt_hex_byte = binascii.hexlify(i)\n # Constructs hex bytes together in one string\n pkt_hex += \"\\\\x\" + pkt_hex_byte\n # Check DATA for SMB Header in Hex\n self.check_smb_header(pkt_hex)\n except Exception as error:\n log_error = \"[!] Error receiving data from socket \\\n with {0} : {1}\".format(self.client_address[0], error)\n self.write_error_log_event(str(log_error))\n" }, { "alpha_fraction": 0.5072992444038391, "alphanum_fraction": 0.5510948896408081, "avg_line_length": 12.095237731933594, "blob_id": "330316af195e9a5c17ab88078a783725a334d352", "content_id": "23f8484ac714c67aaa1cedceeda0c686db64475c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 274, "license_type": "no_license", "max_line_length": 30, "num_lines": 21, "path": "/tests/bash_script_test.sh", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\n\nARGV0=$1\nfor i in $ARGV0; do\n echo $i\n new_port=8$i\n echo $new_port\ndone\n\necho \"doing this to $1 and $2\"\n\nvalue=$(<ports.txt)\necho $value\nfor i in $value; do\n if [ $i -le 100 ]; then\n p=80$i;\n else\n p=8$i;\n fi\necho $i $p\ndone" }, { "alpha_fraction": 0.5533005595207214, "alphanum_fraction": 0.5701106786727905, "avg_line_length": 35.13333511352539, "blob_id": "422799a90cad905d6caf2dd42a47a8064e282fdb", "content_id": "2d9e9341312bae4ec64b6dabed674b189a60fae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4878, "license_type": "no_license", "max_line_length": 114, "num_lines": 135, "path": "/arcticswallow.py", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "import sys\nfrom threading import Thread\nfrom termcolor import colored\nimport colorama\nfrom handlers import *\n\n\nclass HoneyPotHandler(Thread):\n \"\"\" HoneyPotHandler\n 1. reads the port as an argument\n 2. determines which handler to run based of the port\n 3. determines what address to listen on\n 4. starts the SocketServer with the appropriate handler.\n 5. logs the events\n 6. tells the server to live forever (run indefinitely)\n \"\"\"\n def __init__(self, port):\n Thread.__init__(self)\n self.port = port\n self.msrpc_ports = ['8135', '49152', '49153', '49154', '49155']\n\n def run(self):\n if self.port == '8445':\n handler_type = SMBHandler\n elif self.port == '8023':\n handler_type = TelnetHandler\n elif self.port in self.msrpc_ports:\n handler_type = MsrpcHandler\n elif self.port == '8139':\n handler_type = NetBiosHandler\n else:\n handler_type = TCPEchoHandler\n try:\n if int(self.port) < 1024:\n listening_host = 'localhost'\n else:\n try:\n listening_host = sys.argv[2]\n except IndexError:\n listening_host = 'localhost'\n server = SocketServer.TCPServer((listening_host, int(self.port)), handler_type)\n event = \"[*] {0} handler started on {1}:{2}\".format(str(handler_type), listening_host, self.port)\n self.write_event_log_event(event)\n server.serve_forever()\n except Exception as error:\n error = \"[!] There was an error establishing a handler because {0}\".format(error)\n self.write_error_log_event(error)\n\n\n# TODO: fix broken logging method dependency.\nclass HoneyPot:\n def __init__(self):\n self.ports = []\n\n def build_ports_list(self):\n \"\"\"Build Ports List\n 1. reads input file given\n 2. writes the ports to a list\n 3. logs the events\n \"\"\"\n ports_list_file = str(sys.argv[1])\n\n with open(ports_list_file, 'r') as i:\n self.ports = i.readlines()\n self.ports = [x.strip('\\n') for x in self.ports]\n event = \"[*] will start listening on: {0}\".format(self.ports)\n self.write_event_log_event(event)\n return self.ports\n\n def build_pot(self):\n \"\"\" Build Pot\n This function builds the threads that will make the pot.\n It will also adjust privileged ports to unprivileged ports.\n Starts and Joins threads.\n \"\"\"\n thread_list = []\n for port in self.ports:\n if int(port) < 1024:\n if int(port) < 100:\n port = \"80\" + port\n else:\n port = \"8\" + port\n event = \"[*] Starting handler on port {0}\".format(port)\n self.write_event_log_event(event)\n new_thread = HoneyPotHandler(port)\n thread_list.append(new_thread)\n for thread in thread_list:\n thread.start()\n for thread in thread_list:\n thread.join()\n\n def print_usage(self):\n \"\"\" Prints the program usage when:\n 1. No argument for the ports list is given.\n 2. User inputs \"?\" option.\n \"\"\"\n print \"arctic-swallow.py {0} {1}\".format(colored('<ports.txt>', 'red'), colored('<IP-address>', 'yellow'))\n print \"\\t{0} = text file with ports listed, one per line\".format(colored('<ports.txt>', 'red'))\n print \"\\t[!] Don't forget to set up port forwarding with {0}\".format(colored(\"'ipt_config.sh'\", 'green'))\n print \"\\t[!] Don't forget to set up {0} for full packet capture!\".format(colored(\"TCPDUMP\", 'yellow'))\n # don't think we need this options.\n # iptables will forward from all IPs to localhost with one honeypot running\n # print \"\\t{0} = IP Address to listen on for \\\n # non-privileged ports.\".format(colored('<IP-address>', 'yellow'))\n exit(0)\n\n\ndef main():\n \"\"\" Main Function\n 1. initiates colorama for terminal colors on Windows.\n 2. checks for proper inputs, displays help menu.\n 3. runs function to build the ports list.\n a. modifies any privileged port to unprivileged port.\n 4. builds and runs the honey pot.\n \"\"\"\n colorama.init()\n hp = HoneyPot()\n try:\n sys.argv[1]\n except IndexError:\n hp.print_usage()\n if sys.argv[1] == \"?\":\n hp.print_usage()\n else:\n print \"[*] Don't forget to:\"\n print colored(\"[!]Setup port forwarding with 'ipt_setup.sh'!!\", 'green')\n print colored(\"[!]Setup full packet capture with 'tcpdump'!!\", 'yellow')\n print \"[*] Building ports list for handlers.\"\n hp.build_ports_list()\n print \"[*] Starting handlers.\"\n hp.build_pot()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5675265789031982, "alphanum_fraction": 0.600910484790802, "avg_line_length": 20.96666717529297, "blob_id": "c5f95bfeecd60f03e961432eedfa79abe8993bdd", "content_id": "8665fc67708a407438fa0a31797fb58be2c4311d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 47, "num_lines": 30, "path": "/tests/hex_test.py", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "import binascii\n\nsmb_header_negotiate = \"\\xff\\x53\\x4d\\x42\\x72\"\n# smb_header_negotiate = \"0xff0x530x4d0x420x72\"\n\nfile = 'pcaps/smb_response_win10'\nwith open(file, 'rb') as f:\n x = f.read()\n # z = binascii.hexlify(x)\n z = \"\"\n for i in x:\n r = binascii.hexlify(i)\n # r = hex(ord(i))\n z += \"\\\\x\" + r\n # z += r\n print z\n\n# print type(smb_header_negotiate)\nprint type(z)\n\n# if smb_header_negotiate in z:\n# print \"SMB Header Found!\"\n# else:\n# print \"Nothing found!\"\n\n# smb_found = z.find(smb_header_negotiate)\nif z.find(smb_header_negotiate) is not 0:\n print \"SMB Header found!\"\nelse:\n print \"Nothing found!\"\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 21.5, "blob_id": "f5ce37613cccf77cbe90b77be0851f4494a1a188", "content_id": "bd949b80719a65786dc104f89b4674dd0bc90f26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 45, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "ajackal/arctic-swallow", "src_encoding": "UTF-8", "text": "# arctic-swallow\na low interaction honeypot.\n" } ]
8
fnsne/play_book_of_change
https://github.com/fnsne/play_book_of_change
fa649dc7f397eefb567de4c9589b8418e1416240
8d8e66ad8b3c0008c90ee5740fa30c8c89282d36
4bef2f30103e8284e5c42ee72b38f21464518c23
refs/heads/master
2021-01-15T22:08:31.521479
2017-08-10T05:13:59
2017-08-10T05:13:59
99,883,930
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.42793241143226624, "alphanum_fraction": 0.4597415626049042, "avg_line_length": 19.742267608642578, "blob_id": "77862258cd41f8a61d53075a132586f2954f294f", "content_id": "3850eadfb422461bd2d7bbe3bf41ef2ed2329c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2052, "license_type": "no_license", "max_line_length": 49, "num_lines": 97, "path": "/play.py", "repo_name": "fnsne/play_book_of_change", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom random import randint\n\n\ndef divide2(total):\n #print \"\\n--------分二\"\n left = randint(1,total-1)\n right = total - left\n #print \"left : \", left\n #print \"right : \", right\n return [left, right]\n\ndef put1(left,right):\n #print \"\\n--------掛一\"\n right -= 1\n #print \"left :\",left, \"right :\", right\n return [left, right]\n\ndef mod4(left, right):\n #print \"\\n--------喋四\"\n left_rem = left % 4\n if left_rem == 0:\n left_rem = 4\n right_rem = right % 4\n if right_rem == 0:\n right_rem = 4\n #print \"左餘 :\",left_rem\n #print \"右餘 :\",right_rem\n return [left_rem,right_rem]\n\ndef detect_ood(rem):\n #print \"\\n--------歸奇\"\n if (rem/4) == 1:\n return 1\n else:\n return 0\n\ndef print_ood_even(num):\n if num%2 == 1:\n print \"奇\",\n else:\n print \"偶\",\n\ndef get_yao(three_nums):\n odd = 0\n even = 0\n for i in range(0, 3):\n if three_nums[i] == 1:\n odd += 1\n else:\n even +=1\n if odd == 3:\n return 2\n if odd == 2:\n return -1\n if odd == 1:\n return 1\n if odd == 0:\n return -2\ndef showYao(yao):\n if yao == -2:\n print \"變\", \"- -\"\n if yao == -1:\n print \" \", \"- -\"\n if yao == 1:\n print \" \", \"---\"\n if yao == 2:\n print \"變\", \"---\"\ndef showGua(gua):\n for i in range(0,6):\n showYao(gua[i])\n\ndef main():\n total = 50\n total -= 1\n taiji = 1\n temp = [0,0,0]\n hexagram = [0,0,0,0,0,0]\n\n for j in range(0,6):\n total = 49\n for i in range(0,3):\n left, right = divide2(total)\n left, right = put1(left,right)\n left_rem, right_rem = mod4(left,right)\n rem = left_rem + right_rem\n total -= rem\n temp[i] = detect_ood(rem)\n for i in range(0,3):\n print_ood_even(temp[i])\n print \"\\n\"\n hexagram[j] = get_yao(temp)\n\n print \"得卦 :\"\n showGua(hexagram)\nif __name__ == \"__main__\":\n main()\n" } ]
1
HerdOfBears/HHT_EWS
https://github.com/HerdOfBears/HHT_EWS
82ef50e49dd73f2787764c80c0a892fc2e6a4fbc
2945b1edb3da77f67e85bd2e331b0434cbfa1509
99537890053808aab766126ebc6b8e160fb5fdf8
refs/heads/master
2020-06-22T05:03:07.276873
2019-07-24T21:22:11
2019-07-24T21:22:11
197,640,155
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6098628640174866, "alphanum_fraction": 0.6308724880218506, "avg_line_length": 25.167938232421875, "blob_id": "1637499372220b12fffab997b299c90c1033ec2f", "content_id": "103e41634b7d040381e058df1ec0a9bade512894", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3431, "license_type": "no_license", "max_line_length": 86, "num_lines": 131, "path": "/fold_sim.py", "repo_name": "HerdOfBears/HHT_EWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 16:41:47 2018\n\n@author: Thomas Bury\n\nCode to simulate the RM model and compute EWS\n\n\"\"\"\n\n# import python libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nimport os\n\n# import EWS function\nimport sys\nimport time\n\n#---------------------\n# Directory for data output\n#–----------------------\n\n# Name of directory within data_export\ndir_name = 'fold_ews_temp'\n\n# if not os.path.exists('data_export/'+dir_name):\n # os.makedirs('data_export/'+dir_name)\n\n\n#--------------------------------\n# Global parameters\n#–-----------------------------\n\n\n# Simulation parameters\ndt = 0.01\nt0 = 0\ntmax = 10**4#500\ntburn = 100 # burn-in period\nnumSims = 1\nseed = 2 # random number generation seed\n\n# EWS parameters\ndt2 = 1 # spacing between time-series for EWS computation\nrw = 0.4 # rolling window\nbw = 0.1 # bandwidth\nlags = [1,2,3] # autocorrelation lag times\news = ['var','ac','sd','cv','skew','kurt','smax','aic','cf'] # EWS to compute\nham_length = 40 # number of data points in Hamming window\nham_offset = 0.5 # proportion of Hamming window to offset by upon each iteration\npspec_roll_offset = 20 # offset for rolling window when doing spectrum metrics\n\n\n#----------------------------------\n# Simulate many (transient) realisations\n#----------------------------------\n\n# Model (bound system by using a piecewise definition)\ndef de_fun(x,u):\n output = -u - x**2 if x > -1.5 else 0\n return output\n \n# Model parameters\nsigma = 0.1 # noise intensity\nbl = -1 # control parameter initial value\nbh = 0.2 # control parameter final value\nbcrit = 0 # bifurcation point (computed in Mathematica)\nx0 = np.sqrt(-bl) # intial condition (equilibrium value)\n\n\n# Initialise arrays to store single time-series data\nt = np.arange(t0,tmax,dt)\nx = np.zeros(len(t))\ny = np.zeros(len(t))\n\n# Set up bifurcation parameter b, that increases linearly in time from bl to bh\nb = pd.Series(np.linspace(bl,bh,len(t)),index=t)\n# Time at which bifurcation occurs\ntbif = b[b > bcrit].index[1]\n\n\n## Implement Euler Maryuyama for stocahstic simulation\n\n# Set seed\nnp.random.seed(seed)\n\n# Initialise a list to collect trajectories\nlist_traj_append = []\nprint(len(t))\nt_0 = time.time()\n\n# loop over simulations\nprint('\\nBegin simulations \\n')\nfor j in range(numSims):\n \n \n # Create brownian increments (s.d. sqrt(dt))\n dW_x_burn = np.random.normal(loc=0, scale=sigma*np.sqrt(dt), size = int(tburn/dt))\n dW_x = np.random.normal(loc=0, scale=sigma*np.sqrt(dt), size = len(t))\n \n # Run burn-in period on x0\n for i in range(int(tburn/dt)):\n x0 = x0 + de_fun(x0,bl)*dt + dW_x_burn[i]\n \n # Initial condition post burn-in period\n x[0]=x0\n \n # Run simulation\n for i in range(len(t)-1):\n x[i+1] = x[i] + de_fun(x[i],b.iloc[i])*dt + dW_x[i]\n \n # Store series data in a temporary DataFrame\n data = {'Realisation number': (j+1)*np.ones(len(t)),\n 'Time': t,\n 'x': x}\n df_temp = pd.DataFrame(data)\n # Append to list\n list_traj_append.append(df_temp)\n \n print('Simulation '+str(j+1)+' complete')\n\n# Concatenate DataFrame from each realisation\ndf_traj = pd.concat(list_traj_append)\ndf_traj.set_index(['Realisation number','Time'], inplace=True)\n\nprint(time.time() - t_0)\ndf_traj.to_csv(\"/home/jmenard/HHT_EWS/traj_data_fold_long.csv\")" }, { "alpha_fraction": 0.5951448678970337, "alphanum_fraction": 0.6175932884216309, "avg_line_length": 25.61111068725586, "blob_id": "79e67521551851b457fe3621013f7ba5b9c86270", "content_id": "695bcc322c1471b2eb0e312829fb6bf62f2376d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3835, "license_type": "no_license", "max_line_length": 88, "num_lines": 144, "path": "/hopf_sim.py", "repo_name": "HerdOfBears/HHT_EWS", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 20 16:41:47 2018\n\n@author: Thomas Bury\n\nCode to simulate the RM model and compute EWS\n\n\"\"\"\n\n# import python libraries\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# import seaborn as sns\nimport os\n\n# import EWS function\nimport sys\n# sys.path.append('../../early_warnings')\n# from ews_compute import ews_compute\n\n\n#---------------------\n# Directory for data output\n#–----------------------\n\n# Name of directory within data_export\ndir_name = 'hopf_ews_temp'\n\n# if not os.path.exists('data_export/'+dir_name):\n # os.makedirs('data_export/'+dir_name)\n\n\n#--------------------------------\n# Global parameters\n#–-----------------------------\n\n\n# Simulation parameters\ndt = 0.01\nt0 = 0\ntmax = 500\ntburn = 100 # burn-in period\nnumSims = 2\nseed = 10 # random number generation seed\n\n# EWS parameters\ndt2 = 1 # spacing between time-series for EWS computation\nrw = 0.4 # rolling window\nbw = 0.1 # bandwidth\nlags = [1,2,3] # autocorrelation lag times\news = ['var','ac','sd','cv','skew','kurt','smax','aic','cf'] # EWS to compute\nham_length = 40 # number of data points in Hamming window\nham_offset = 0.5 # proportion of Hamming window to offset by upon each iteration\npspec_roll_offset = 20 # offset for rolling window when doing spectrum metrics\n\n\n#----------------------------------\n# Simulate many (transient) realisations\n#----------------------------------\n\n# Model\n\ndef de_fun_x(x,y,u,w):\n return u*x-w*y-x*(x**2+y**2)\n\ndef de_fun_y(x,y,u,w):\n return w*x+u*y-y*(x**2+y**2)\n \n# Model parameters\nsigma_x = 0.05 # noise intensity\nsigma_y = 0.05\nw = 2 # intrinsic frequency at Hopf bifurcation\nbl = -1 # control parameter initial value\nbh = 0.2 # control parameter final value\nbcrit = 0 # bifurcation point (computed in Mathematica)\nx0 = 0 # intial condition (equilibrium value)\ny0 = 0\n\n\n\n# Initialise arrays to store single time-series data\nt = np.arange(t0,tmax,dt)\nx = np.zeros(len(t))\ny = np.zeros(len(t))\n\n# Set up bifurcation parameter b, that increases linearly in time from bl to bh\nb = pd.Series(np.linspace(bl,bh,len(t)),index=t)\n# Time at which bifurcation occurs\ntbif = b[b > bcrit].index[1]\n\n## Implement Euler Maryuyama for stocahstic simulation\n\n\n# Set seed\nnp.random.seed(seed)\n\n# Initialise a list to collect trajectories\nlist_traj_append = []\n\n# loop over simulations\nprint('\\nBegin simulations \\n')\nfor j in range(numSims):\n \n \n # Create brownian increments (s.d. sqrt(dt))\n dW_x_burn = np.random.normal(loc=0, scale=sigma_x*np.sqrt(dt), size = int(tburn/dt))\n dW_x = np.random.normal(loc=0, scale=sigma_x*np.sqrt(dt), size = len(t))\n \n dW_y_burn = np.random.normal(loc=0, scale=sigma_y*np.sqrt(dt), size = int(tburn/dt))\n dW_y = np.random.normal(loc=0, scale=sigma_y*np.sqrt(dt), size = len(t))\n \n # Run burn-in period on x0\n for i in range(int(tburn/dt)):\n x0 = x0 + de_fun_x(x0,y0,bl,w)*dt + dW_x_burn[i]\n y0 = y0 + de_fun_y(x0,y0,bl,w)*dt + dW_y_burn[i]\n \n # Initial condition post burn-in period\n x[0]=x0\n y[0]=y0\n \n # Run simulation\n for i in range(len(t)-1):\n x[i+1] = x[i] + de_fun_x(x[i],y[i],b.iloc[i],w)*dt + dW_x[i]\n y[i+1] = y[i] + de_fun_y(x[i],y[i],b.iloc[i],w)*dt + dW_y[i]\n \n # Store series data in a temporary DataFrame\n data = {'Realisation number': (j+1)*np.ones(len(t)),\n 'Time': t,\n 'x': x,\n 'y': y}\n df_temp = pd.DataFrame(data)\n # Append to list\n list_traj_append.append(df_temp)\n \n print('Simulation '+str(j+1)+' complete')\n\n# Concatenate DataFrame from each realisation\ndf_traj = pd.concat(list_traj_append)\ndf_traj.set_index(['Realisation number','Time'], inplace=True)\n\ndf_traj.to_csv(\"/home/jmenard/HHT_EWS/traj_data_hopf.csv\")" }, { "alpha_fraction": 0.8032786846160889, "alphanum_fraction": 0.8032786846160889, "avg_line_length": 29, "blob_id": "b05ce31be99706a3b71ad61d5bc331afc96a1386", "content_id": "39ff3e3aaa95914e530daf7ccd460a23594c2ee9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 61, "license_type": "no_license", "max_line_length": 49, "num_lines": 2, "path": "/README.md", "repo_name": "HerdOfBears/HHT_EWS", "src_encoding": "UTF-8", "text": "# HHT_EWS\nHilbert-Huang Transform for Early-Warning Signals \n" } ]
3
arqchicago/nn-housing
https://github.com/arqchicago/nn-housing
199cefa29c217b4bff40cfec5dfbc34f6328de50
2e7b4f672faa6a4741fcd4e230075bae73703fcf
2253ab5e66baa62ebf18a938753d8ca9e2efe57f
refs/heads/main
2023-04-05T04:39:21.477728
2021-04-17T02:47:38
2021-04-17T02:47:38
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.695067286491394, "alphanum_fraction": 0.713004469871521, "avg_line_length": 37.06097412109375, "blob_id": "1faab49aa0353aaaefdd449ff0fa7da9d1a81761", "content_id": "df4c0c63123a199113be65196bdd3d69185fe49c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3122, "license_type": "no_license", "max_line_length": 125, "num_lines": 82, "path": "/nn_housing.py", "repo_name": "arqchicago/nn-housing", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n@author: Ahmad Qadri\nSequential Neural Network with dropout hidden layer and softmax output layer on Housing Dataset\n\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport sklearn.model_selection as skms\nfrom time import time\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Activation, Dense, Dropout\nfrom tensorflow.python.keras.callbacks import TensorBoard\nfrom tensorflow.keras import utils\n\n\nseed = 5941\ntf.random.set_seed(seed)\n\n#---- features and target variable\nquant_features = ['bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot', 'grade', 'age', 'appliance_age', 'crime', 'renovated'] \ncat_features = ['backyard', 'view', 'condition'] \nfeatures = quant_features\n\ntarget_var_orig = 'price'\ntarget_var = 'high_priced'\nweight = 'weight'\nsqft = 'sqft_living'\n\n#---- uploading data\nhousing_df = pd.read_csv('data\\\\housing.csv')\nhousing_df[target_var] = 0\nhousing_df.loc[housing_df[target_var_orig]>350000, target_var] = 1\nhousing_df.loc[housing_df[target_var_orig]>550000, target_var] = 2\nrows, cols = housing_df.shape\nprint(f'> rows = {rows}, cols = {cols}')\n\n#---- train/test split\nX, y = housing_df[features], housing_df[target_var]\nX_train, X_test, y_train, y_test = skms.train_test_split(X, y, test_size=0.20, random_state = seed)\n\ny_train_dummy = utils.to_categorical(y_train)\ny_test_dummy = utils.to_categorical(y_test)\n\nX_train_rows, y_train_rows = X_train.shape[0], y_train.shape[0]\nX_test_rows, y_test_rows = X_test.shape[0], y_test.shape[0] \n\n#X_train_weights = housing_df[weight].loc[X_train.index.values]\n#X_test_weights = housing_df[weight].loc[X_test.index.values]\n\nX_train_hp_rows = housing_df.groupby(target_var).size().to_frame('size').reset_index().values.tolist()\nX_test_hp_rows = housing_df.groupby(target_var).size().to_frame('size').reset_index().values.tolist()\n\nprint(f'> features = {len(features)}')\nprint(f'> training set = {X_train_rows} ({round(X_train_rows*1.0/rows,3)})')\nprint(f'> testing set = {X_test_rows} ({round(X_test_rows*1.0/rows,3)})\\n')\nprint(f'> training set price dummy = {X_train_hp_rows}')\nprint(f'> testing set price dummy = {X_test_hp_rows}\\n')\n\n#---- creating the model\nmodel = keras.Sequential()\nmodel.add(keras.Input(shape=(9,)))\nmodel.add(Dense(18, activation='sigmoid', name='layer1'))\nmodel.add(Dense(36, activation='sigmoid', name='layer2'))\nmodel.add(Dropout(0.10, name='dropout'))\nmodel.add(Dense(18, activation='sigmoid', name='layer3'))\nmodel.add(Dense(3, activation='softmax', name='output_layer'))\n\ntensorboard = TensorBoard(log_dir=\"logs/{}\".format(time()))\n\nprint(model.summary())\nprint(f'input shape= {model.input_shape}')\nprint(f'output shape= {model.output_shape}')\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\nmodel.fit(X_train, y_train_dummy, epochs=500, validation_split=0.10, verbose=1, callbacks=[tensorboard])\n\ntest_set_loss, test_set_accuracy = model.evaluate(X_test, y_test_dummy)\nprint(f'test set loss = {round(test_set_loss, 4)} test set accuracy = {round(test_set_accuracy, 4)}')\n\n" } ]
1
berkutsoft/web
https://github.com/berkutsoft/web
387c5dd5aa877adc6b50f40171aad8c95b0aa1b7
37b478f86ce8ce079fbfb36fd05b5a6becc09aec
ffa687d0ea90286751bb9522bf6e76be2ccc1d05
refs/heads/master
2021-01-21T04:42:31.120493
2016-07-19T13:14:55
2016-07-19T13:14:55
53,557,365
0
1
null
2016-03-10T05:20:54
2016-03-23T08:05:28
2016-07-21T07:57:00
HTML
[ { "alpha_fraction": 0.6050847172737122, "alphanum_fraction": 0.6203389763832092, "avg_line_length": 25.81818199157715, "blob_id": "f07c3729cd2ff272662fb147970e3493a2c3e59a", "content_id": "6bdf93fc274c5d523be3bf775f3b727902ca0bd5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 596, "license_type": "no_license", "max_line_length": 66, "num_lines": 22, "path": "/ask/qa/views.py", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse, HttpRequest\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom models import *\n\nfrom django.views.decorators.http import require_GET\n@require_GET\n\ndef test(request, *args, **kwargs):\n return HttpResponse('OK')\n\ndef page(pg=1):\n pg = pg.GET.get('page')\n if pg is None: pg=''\n question = get_object_or_404(Question)\n return render_to_response('index.html', {\n 'page': {\n 'title':'TITLE',\n 'header':u'Хеадер!'+pg,\n 'question':question[0]\n }\n })\n" }, { "alpha_fraction": 0.6476190686225891, "alphanum_fraction": 0.723809540271759, "avg_line_length": 24.75, "blob_id": "6a14565c43a51e182a11576a980d9f407cb308e7", "content_id": "a2db47825b7998e2665e14c40f4a927d51ddf171", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 105, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/min.sh", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "sudo killall gunicorn\nsudo /etc/init.d/nginx restart\ncd ~/web/ask\ngunicorn -D -b 0.0.0.0:8000 ask.wsgi\n\n\n" }, { "alpha_fraction": 0.626867949962616, "alphanum_fraction": 0.6862626075744629, "avg_line_length": 36.991355895996094, "blob_id": "9ac5c7febe582e4acf7c2879883855d15663ba1e", "content_id": "3faca175dee2c95bd253f8a5e0c0a182196d50b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 13183, "license_type": "no_license", "max_line_length": 955, "num_lines": 347, "path": "/django.sql", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "-- MySQL dump 10.13 Distrib 5.5.44, for debian-linux-gnu (x86_64)\n--\n-- Host: localhost Database: django\n-- ------------------------------------------------------\n-- Server version\t5.5.44-0ubuntu0.14.04.1\n\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;\n/*!40101 SET NAMES utf8 */;\n/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n/*!40103 SET TIME_ZONE='+00:00' */;\n/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;\n/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n\n--\n-- Table structure for table `answer`\n--\n\nDROP TABLE IF EXISTS `answer`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `answer` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `question` int(11) NOT NULL,\n `text` longtext NOT NULL,\n `added_at` datetime NOT NULL,\n `author_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `answer_e969df21` (`author_id`),\n CONSTRAINT `author_id_refs_id_79ae0258` FOREIGN KEY (`author_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `answer`\n--\n\nLOCK TABLES `answer` WRITE;\n/*!40000 ALTER TABLE `answer` DISABLE KEYS */;\n/*!40000 ALTER TABLE `answer` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_group`\n--\n\nDROP TABLE IF EXISTS `auth_group`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_group` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(80) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `name` (`name`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_group`\n--\n\nLOCK TABLES `auth_group` WRITE;\n/*!40000 ALTER TABLE `auth_group` DISABLE KEYS */;\n/*!40000 ALTER TABLE `auth_group` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_group_permissions`\n--\n\nDROP TABLE IF EXISTS `auth_group_permissions`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_group_permissions` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `group_id` int(11) NOT NULL,\n `permission_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `group_id` (`group_id`,`permission_id`),\n KEY `auth_group_permissions_5f412f9a` (`group_id`),\n KEY `auth_group_permissions_83d7f98b` (`permission_id`),\n CONSTRAINT `group_id_refs_id_f4b32aac` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`),\n CONSTRAINT `permission_id_refs_id_6ba0f519` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_group_permissions`\n--\n\nLOCK TABLES `auth_group_permissions` WRITE;\n/*!40000 ALTER TABLE `auth_group_permissions` DISABLE KEYS */;\n/*!40000 ALTER TABLE `auth_group_permissions` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_permission`\n--\n\nDROP TABLE IF EXISTS `auth_permission`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_permission` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(50) NOT NULL,\n `content_type_id` int(11) NOT NULL,\n `codename` varchar(100) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `content_type_id` (`content_type_id`,`codename`),\n KEY `auth_permission_37ef4eb4` (`content_type_id`),\n CONSTRAINT `content_type_id_refs_id_d043b34a` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`)\n) ENGINE=InnoDB AUTO_INCREMENT=22 DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_permission`\n--\n\nLOCK TABLES `auth_permission` WRITE;\n/*!40000 ALTER TABLE `auth_permission` DISABLE KEYS */;\nINSERT INTO `auth_permission` VALUES (1,'Can add log entry',1,'add_logentry'),(2,'Can change log entry',1,'change_logentry'),(3,'Can delete log entry',1,'delete_logentry'),(4,'Can add permission',2,'add_permission'),(5,'Can change permission',2,'change_permission'),(6,'Can delete permission',2,'delete_permission'),(7,'Can add group',3,'add_group'),(8,'Can change group',3,'change_group'),(9,'Can delete group',3,'delete_group'),(10,'Can add user',4,'add_user'),(11,'Can change user',4,'change_user'),(12,'Can delete user',4,'delete_user'),(13,'Can add content type',5,'add_contenttype'),(14,'Can change content type',5,'change_contenttype'),(15,'Can delete content type',5,'delete_contenttype'),(16,'Can add question',6,'add_question'),(17,'Can change question',6,'change_question'),(18,'Can delete question',6,'delete_question'),(19,'Can add answer',7,'add_answer'),(20,'Can change answer',7,'change_answer'),(21,'Can delete answer',7,'delete_answer');\n/*!40000 ALTER TABLE `auth_permission` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_user`\n--\n\nDROP TABLE IF EXISTS `auth_user`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_user` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `password` varchar(128) NOT NULL,\n `last_login` datetime NOT NULL,\n `is_superuser` tinyint(1) NOT NULL,\n `username` varchar(30) NOT NULL,\n `first_name` varchar(30) NOT NULL,\n `last_name` varchar(30) NOT NULL,\n `email` varchar(75) NOT NULL,\n `is_staff` tinyint(1) NOT NULL,\n `is_active` tinyint(1) NOT NULL,\n `date_joined` datetime NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `username` (`username`)\n) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_user`\n--\n\nLOCK TABLES `auth_user` WRITE;\n/*!40000 ALTER TABLE `auth_user` DISABLE KEYS */;\nINSERT INTO `auth_user` VALUES (1,'pbkdf2_sha256$12000$TL3SKv16MRcD$17Te/xUF8faQhY9U2f2aTfhtd4Z60gQdnLXORXi+49c=','2016-05-20 10:06:46',1,'box','','','[email protected]',1,1,'2016-05-20 10:06:46');\n/*!40000 ALTER TABLE `auth_user` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_user_groups`\n--\n\nDROP TABLE IF EXISTS `auth_user_groups`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_user_groups` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `group_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `user_id` (`user_id`,`group_id`),\n KEY `auth_user_groups_6340c63c` (`user_id`),\n KEY `auth_user_groups_5f412f9a` (`group_id`),\n CONSTRAINT `user_id_refs_id_40c41112` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),\n CONSTRAINT `group_id_refs_id_274b862c` FOREIGN KEY (`group_id`) REFERENCES `auth_group` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_user_groups`\n--\n\nLOCK TABLES `auth_user_groups` WRITE;\n/*!40000 ALTER TABLE `auth_user_groups` DISABLE KEYS */;\n/*!40000 ALTER TABLE `auth_user_groups` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `auth_user_user_permissions`\n--\n\nDROP TABLE IF EXISTS `auth_user_user_permissions`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `auth_user_user_permissions` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `user_id` int(11) NOT NULL,\n `permission_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `user_id` (`user_id`,`permission_id`),\n KEY `auth_user_user_permissions_6340c63c` (`user_id`),\n KEY `auth_user_user_permissions_83d7f98b` (`permission_id`),\n CONSTRAINT `user_id_refs_id_4dc23c39` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`),\n CONSTRAINT `permission_id_refs_id_35d9ac25` FOREIGN KEY (`permission_id`) REFERENCES `auth_permission` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `auth_user_user_permissions`\n--\n\nLOCK TABLES `auth_user_user_permissions` WRITE;\n/*!40000 ALTER TABLE `auth_user_user_permissions` DISABLE KEYS */;\n/*!40000 ALTER TABLE `auth_user_user_permissions` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `django_admin_log`\n--\n\nDROP TABLE IF EXISTS `django_admin_log`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `django_admin_log` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `action_time` datetime NOT NULL,\n `user_id` int(11) NOT NULL,\n `content_type_id` int(11) DEFAULT NULL,\n `object_id` longtext,\n `object_repr` varchar(200) NOT NULL,\n `action_flag` smallint(5) unsigned NOT NULL,\n `change_message` longtext NOT NULL,\n PRIMARY KEY (`id`),\n KEY `django_admin_log_6340c63c` (`user_id`),\n KEY `django_admin_log_37ef4eb4` (`content_type_id`),\n CONSTRAINT `content_type_id_refs_id_93d2d1f8` FOREIGN KEY (`content_type_id`) REFERENCES `django_content_type` (`id`),\n CONSTRAINT `user_id_refs_id_c0d12874` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `django_admin_log`\n--\n\nLOCK TABLES `django_admin_log` WRITE;\n/*!40000 ALTER TABLE `django_admin_log` DISABLE KEYS */;\n/*!40000 ALTER TABLE `django_admin_log` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `django_content_type`\n--\n\nDROP TABLE IF EXISTS `django_content_type`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `django_content_type` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `name` varchar(100) NOT NULL,\n `app_label` varchar(100) NOT NULL,\n `model` varchar(100) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `app_label` (`app_label`,`model`)\n) ENGINE=InnoDB AUTO_INCREMENT=8 DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `django_content_type`\n--\n\nLOCK TABLES `django_content_type` WRITE;\n/*!40000 ALTER TABLE `django_content_type` DISABLE KEYS */;\nINSERT INTO `django_content_type` VALUES (1,'log entry','admin','logentry'),(2,'permission','auth','permission'),(3,'group','auth','group'),(4,'user','auth','user'),(5,'content type','contenttypes','contenttype'),(6,'question','qa','question'),(7,'answer','qa','answer');\n/*!40000 ALTER TABLE `django_content_type` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `question`\n--\n\nDROP TABLE IF EXISTS `question`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `question` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `title` varchar(255) NOT NULL,\n `text` longtext NOT NULL,\n `added_at` datetime NOT NULL,\n `raiting` int(11) NOT NULL,\n `author_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n KEY `question_e969df21` (`author_id`),\n CONSTRAINT `author_id_refs_id_aba11983` FOREIGN KEY (`author_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `question`\n--\n\nLOCK TABLES `question` WRITE;\n/*!40000 ALTER TABLE `question` DISABLE KEYS */;\n/*!40000 ALTER TABLE `question` ENABLE KEYS */;\nUNLOCK TABLES;\n\n--\n-- Table structure for table `question_likes`\n--\n\nDROP TABLE IF EXISTS `question_likes`;\n/*!40101 SET @saved_cs_client = @@character_set_client */;\n/*!40101 SET character_set_client = utf8 */;\nCREATE TABLE `question_likes` (\n `id` int(11) NOT NULL AUTO_INCREMENT,\n `question_id` int(11) NOT NULL,\n `user_id` int(11) NOT NULL,\n PRIMARY KEY (`id`),\n UNIQUE KEY `question_id` (`question_id`,`user_id`),\n KEY `question_likes_25110688` (`question_id`),\n KEY `question_likes_6340c63c` (`user_id`),\n CONSTRAINT `question_id_refs_id_44570623` FOREIGN KEY (`question_id`) REFERENCES `question` (`id`),\n CONSTRAINT `user_id_refs_id_66d3c544` FOREIGN KEY (`user_id`) REFERENCES `auth_user` (`id`)\n) ENGINE=InnoDB DEFAULT CHARSET=latin1;\n/*!40101 SET character_set_client = @saved_cs_client */;\n\n--\n-- Dumping data for table `question_likes`\n--\n\nLOCK TABLES `question_likes` WRITE;\n/*!40000 ALTER TABLE `question_likes` DISABLE KEYS */;\n/*!40000 ALTER TABLE `question_likes` ENABLE KEYS */;\nUNLOCK TABLES;\n/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n\n-- Dump completed on 2016-05-20 10:13:47\n" }, { "alpha_fraction": 0.6837607026100159, "alphanum_fraction": 0.6837607026100159, "avg_line_length": 28.25, "blob_id": "673ed7b641cb65c3a99d775f88f1fd96844551cd", "content_id": "7492fe9b4012996fbfc9b44c2dc537263e1d67c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 117, "license_type": "no_license", "max_line_length": 42, "num_lines": 4, "path": "/git.sh", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "git add * -f\ngit config --global user.name \"berkutsoft\"\ngit config --global user.email \"[email protected]\"\n#git commit -a -m \"\n" }, { "alpha_fraction": 0.637499988079071, "alphanum_fraction": 0.6526785492897034, "avg_line_length": 25.046510696411133, "blob_id": "ebb882a21e51773d6deac992f27e0562006a5ec0", "content_id": "7d71353c27041822ab12693a49ae94c72d81e28d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1120, "license_type": "no_license", "max_line_length": 79, "num_lines": 43, "path": "/program.py", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, Unicode, BigInteger\n\nBase = declarative_base()\n\n\nclass Order(Base):\n\n __tablename__ = 'orders'\n\n id = Column(BigInteger, nullable=False, primary_key=True)\n name = Column(Unicode(255), nullable=False)\n # accepted = 1, hold = 0\n state = Column(Integer, nullable=False, index=True)\n\n def __init__(self, name, state):\n self.name = name\n self.state = state\n\n def __repr__(self):\n return \"Name: '%s'\" % (self.name)\n\n\ndb_engine = create_engine('mysql+mysqldb://root:pass@localhost/DB', echo=False)\nBase.metadata.create_all(db_engine)\nSession = sessionmaker(bind=db_engine)\nsession = Session()\n\n\ndef mark_random_orders_accepted(num):\n c = 0\n while c < num:\n q = session.query(Order).filter(Order.state == 0).yield_per(100)\n for order in q:\n order.state = 1\n c += 1\n session.commit()\n\nif __name__ == \"__main__\":\n mark_random_orders_accepted(20000)\n" }, { "alpha_fraction": 0.6858876943588257, "alphanum_fraction": 0.6904400587081909, "avg_line_length": 31.75, "blob_id": "a6365be3a0e09841df68ac8997c1886dc8d1190d", "content_id": "0f4d20bd1d9017e8f01f8df6d9fa882e4a26068f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 659, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/ask/qa/models.py", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Question (models.Model):\n class Meta:\n db_table = 'question'\n title = models.CharField(max_length=255)\n text = models.TextField()\n added_at = models.DateTimeField(auto_now_add=True)\n raiting = models.IntegerField() \n author = models.ForeignKey(User)\n likes = models.ManyToManyField(User, related_name='likes_set')\n\nclass Answer (models.Model):\n class Meta:\n db_table = 'answer'\n question = models.IntegerField()\n text = models.TextField()\n added_at = models.DateTimeField(auto_now_add=True)\n author = models.ForeignKey(User)\n \n" }, { "alpha_fraction": 0.6800000071525574, "alphanum_fraction": 0.7217391133308411, "avg_line_length": 40.07143020629883, "blob_id": "0356e842b425786bf2b07b0606859f50f38f4470", "content_id": "b3016a915b60195583dad9da9d0bf2fe51d05ee7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 575, "license_type": "no_license", "max_line_length": 67, "num_lines": 14, "path": "/init.sh", "repo_name": "berkutsoft/web", "src_encoding": "UTF-8", "text": "sudo rm /etc/nginx/sites-enabled/default\nsudo ln -s ~/web/etc/nginx.conf /etc/nginx/sites-enabled/test.conf\nsudo /etc/init.d/nginx restart\n#sudo ln -s ~/web/etc/hello.py /etc/gunicorn.d/hello.py\nsudo ln -s ~/web/etc/gunicorn.config /etc/gunicorn.d/ask\n#sudo ln -s ~/web/etc/django.py /etc/gunicorn.d/django.py\n#sudo /etc/init.d/gunicorn restart\n#gunicorn -b 0.0.0.0:8080 -D hello:app\n#gunicorn -b 0.0.0.0:8080 -D django:app\ncd ~/web/ask\ngunicorn -D -b 0.0.0.0:8000 ask.wsgi\nsudo /etc/init.d/mysql start\nmysql -uroot -e \"create database django\"\npython manage.py syncdb\n" } ]
7
roman-karpovich/drf-batch-requests
https://github.com/roman-karpovich/drf-batch-requests
c04b793e0763787b6fd6b561fbc344492956faac
eaf7e119ba0740a4a01d98ebf2f6b9665131eb50
7d9a7076b9078d13164924e4e13e49ddcbb64f33
refs/heads/master
2023-02-08T02:40:18.850889
2022-06-17T03:08:15
2022-06-17T03:08:15
95,201,494
12
8
MIT
2017-06-23T08:45:02
2022-06-17T02:55:20
2023-02-03T04:46:13
Python
[ { "alpha_fraction": 0.8019323945045471, "alphanum_fraction": 0.8019323945045471, "avg_line_length": 33.5, "blob_id": "887296a2e37e24c1ac828f385d6b01f008e7a6b0", "content_id": "7cfeaca0e6a1bd2ec16fcd6d094dc9bb4758d7ad", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 207, "license_type": "permissive", "max_line_length": 114, "num_lines": 6, "path": "/drf_batch_requests/settings.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from django.conf import settings\n\n# Consumer backend\nREQUESTS_CONSUMER_BACKEND = getattr(\n settings, \"DRF_BATCH_REQUESTS_CONSUMER_BACKEND\", 'drf_batch_requests.backends.sync.SyncRequestsConsumeBackend'\n)\n" }, { "alpha_fraction": 0.7833333611488342, "alphanum_fraction": 0.7833333611488342, "avg_line_length": 59, "blob_id": "13d949101d3f19f702fcb38cc281d6aa45d06d37", "content_id": "0f394df0ca907036bfd40468206e36cdedb91c69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "permissive", "max_line_length": 103, "num_lines": 3, "path": "/drf_batch_requests/backends/base.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "class RequestsConsumeBaseBackend(object):\n def consume_request(self, request, start_callback=None, success_callback=None, fail_callback=None):\n raise NotImplementedError\n" }, { "alpha_fraction": 0.4980921745300293, "alphanum_fraction": 0.5071910619735718, "avg_line_length": 33.06999969482422, "blob_id": "34315b6b9e9ca373457cf1cad9c1a4fcb239db02", "content_id": "b96d8e2c3f1b438c178c8fd2f8b9d0dce1360e46", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3407, "license_type": "permissive", "max_line_length": 110, "num_lines": 100, "path": "/tests/test_view.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\n\nfrom rest_framework import status\n\nfrom tests.mixins import APITestCase\n\n\nclass BaseTestCase(APITestCase):\n def test_json_batch(self):\n batch = [\n {\n \"method\": \"GET\",\n \"relative_url\": \"/tests/test/\",\n \"name\": \"request1\"\n },\n {\n \"method\": \"GET\",\n \"relative_url\": \"/tests/test/?ids={result=request1:$.data.*.id}\"\n }\n ]\n\n responses = self.forced_auth_req('post', '/batch/', data={'batch': batch})\n self.assertEqual(responses.status_code, status.HTTP_200_OK, msg=responses.data)\n self.assertEqual(\"request1\", responses.data[0]['name'])\n self.assertEqual(\"OK\", responses.data[0]['code_text'])\n\n responses_data = [json.loads(r['body']) for r in responses.data]\n self.assertIn('ids', responses_data[1]['get'])\n self.assertEqual(\n responses_data[1]['get']['ids'],\n ','.join([str(o['id']) for o in responses_data[0]['data']])\n )\n\n def test_multipart_simple_request(self):\n batch = [\n {\n \"method\": \"GET\",\n \"relative_url\": \"/tests/test/\"\n }\n ]\n\n responses = self.forced_auth_req(\n 'post', '/batch/',\n data={'batch': json.dumps(batch)},\n request_format='multipart',\n )\n self.assertEqual(responses.status_code, status.HTTP_200_OK, msg=responses.data)\n\n responses_data = list(map(lambda r: json.loads(r['body']), responses.data))\n\n self.assertIn('data', responses_data[0])\n\n def test_multipart_files_upload(self):\n batch = [\n {\n \"method\": \"POST\",\n \"relative_url\": \"/tests/test-files/\",\n \"attached_files\": {\n \"file\": \"file1\",\n \"second_file\": 'file2'\n }\n }\n ]\n\n responses = self.forced_auth_req(\n 'post', '/batch/',\n data={\n 'batch': json.dumps(batch),\n 'file1': SimpleUploadedFile('hello_world.txt', u'hello world!'.encode('utf-8')),\n 'file2': SimpleUploadedFile('second file.txt', u'test!'.encode('utf-8')),\n },\n request_format='multipart',\n )\n self.assertEqual(responses.status_code, status.HTTP_200_OK, msg=responses.data)\n\n responses_data = list(map(lambda r: json.loads(r['body']), responses.data))\n self.assertIn('files', responses_data[0])\n self.assertListEqual(sorted(['file', 'second_file']), sorted(list(responses_data[0]['files'].keys())))\n self.assertListEqual(\n sorted(['hello_world.txt', 'second file.txt']),\n sorted([a['name'] for a in responses_data[0]['files'].values()])\n )\n\n def test_non_json(self):\n responses = self.forced_auth_req(\n 'post', '/batch/',\n data={\n 'batch': [\n {\n 'method': 'GET',\n 'relative_url': '/test-non-json/'\n }\n ]\n }\n )\n\n self.assertEqual(responses.status_code, status.HTTP_200_OK, msg=responses.data)\n self.assertEqual(responses.data[0]['body'], 'test non-json output')\n" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.6175438761711121, "avg_line_length": 14, "blob_id": "173cfc67feb4b6263ea6cb3a70a4f4a74396134d", "content_id": "06536e3380508ca90d7e9b7bdbdfff7533606416", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 285, "license_type": "permissive", "max_line_length": 70, "num_lines": 19, "path": "/.flake8", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "[flake8]\nmax-line-length = 120\nignore =\n ; PyFlakes errors\n ; F405 name may be undefined, or defined from star imports: module\n ; DUO130 insecure use of \"hashlib\" module\n DUO130\n F405\n W503\n S105\n S107\n S303\n P103\n\n\n\nexclude =\n */migrations\n ./.tox\n" }, { "alpha_fraction": 0.6845070719718933, "alphanum_fraction": 0.6901408433914185, "avg_line_length": 31.272727966308594, "blob_id": "380c00b86dfae4cd02fb399a43a5bec58485d2ae", "content_id": "b3d1c08408c6598331376d46e4dca8284d5c78e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "permissive", "max_line_length": 88, "num_lines": 11, "path": "/drf_batch_settings/urls.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "try:\n from django.conf.urls import include, url\nexcept ImportError:\n # django 2.0\n from django.urls import include\n from django.urls import re_path as url\n\nurlpatterns = [\n url(r'^batch/', include('drf_batch_requests.urls', namespace='drf_batch')),\n url(r'^example/', include('drf_batch_example.urls', namespace='drf_batch_example')),\n]\n" }, { "alpha_fraction": 0.5662291049957275, "alphanum_fraction": 0.5680190920829773, "avg_line_length": 25.603174209594727, "blob_id": "97afd00a9ff160bb4adf09e6b9f48c2041b23a71", "content_id": "be950cf1153a27d035e93de0fe2dbf6db2c1e3f9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1676, "license_type": "permissive", "max_line_length": 104, "num_lines": 63, "path": "/drf_batch_requests/response.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "import json\nfrom json import JSONDecodeError\nfrom typing import Iterable\n\nfrom rest_framework.status import is_success\n\n\nclass ResponseHeader:\n def __init__(self, name: str, value: str):\n self.name = name\n self.value = value\n\n def to_dict(self):\n return {\n 'key': self.name,\n 'value': self.value,\n }\n\n\nclass BatchResponse:\n name: str\n code: int\n code_text: str\n headers: Iterable[ResponseHeader]\n body: str\n _data: dict\n _return_body: bool = True\n\n def __init__(self, name: str, status_code: int, body: str, headers: Iterable[ResponseHeader] = None,\n omit_response_on_success: bool = False, status_text: str = None):\n self.name = name\n self.status_code = status_code\n self.status_text = status_text\n self.body = body\n self.headers = headers or []\n self.omit_response_on_success = omit_response_on_success\n\n if is_success(self.status_code):\n try:\n self._data = json.loads(self.body)\n except JSONDecodeError:\n self._data = {}\n\n if is_success(self.status_code) and self.omit_response_on_success:\n self._return_body = False\n\n def to_dict(self) -> dict:\n return {\n 'name': self.name,\n 'code': self.status_code,\n 'code_text': self.status_text,\n 'headers': [h.to_dict() for h in self.headers],\n 'body': self.body,\n }\n\n @property\n def data(self):\n return self._data\n\n\nclass DummyBatchResponse(BatchResponse):\n def __init__(self, name: str):\n super().__init__(name, 418, '')\n" }, { "alpha_fraction": 0.5727272629737854, "alphanum_fraction": 0.574999988079071, "avg_line_length": 23.44444465637207, "blob_id": "5e9d40e14e431f22d7a2bfc137c2eeb08d06b3c3", "content_id": "1b910715fe1364f2cf657c7815aa808b8efc37e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 880, "license_type": "permissive", "max_line_length": 78, "num_lines": 36, "path": "/drf_batch_requests/utils.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "import random\nimport string\n\n\ndef get_attribute(instance, attrs):\n for attr in attrs:\n if instance is None:\n return None\n\n if attr == '*':\n # todo: maybe there should be some kind of filtering?\n continue\n\n if isinstance(instance, list):\n instance = list(map(lambda i: i[attr], instance))\n else:\n instance = instance[attr]\n return instance\n\n\ndef generate_random_id(size=10, chars=string.ascii_uppercase + string.digits):\n return ''.join(random.choice(chars) for _ in range(size))\n\n\ndef generate_node_callback(node, status):\n def callback():\n if status == 'start':\n node.start()\n elif status == 'success':\n node.complete()\n elif status == 'fail':\n node.fail()\n else:\n raise NotImplementedError\n\n return callback\n" }, { "alpha_fraction": 0.4308300316333771, "alphanum_fraction": 0.6284584999084473, "avg_line_length": 25.172412872314453, "blob_id": "9c73fd677c026f44c74a98032b46a804d3a53c0d", "content_id": "d0ce3be06de56e450ca2154e31e25de7f1a53f29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 759, "license_type": "permissive", "max_line_length": 58, "num_lines": 29, "path": "/tox.ini", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "[tox]\nenvlist =\n py38-linter,\n {py36,py37,py38}-django{111,22},\n {py36,py37,py38,py39}-django{30,31},\n {py37,py38,py39,py310}-django{32},\n {py38,py39,py310}-django{40},\n[testenv]\nsetenv=\n PYTHONPATH=\ndeps =\n django111: django>=1.11,<2\n django22: django>=2.2,<3\n django30: django>=3.0,<3.1\n django31: django>=3.1,<3.2\n django32: django>=3.2,<3.3\n django40: django>=4.0,<4.1\n\n django{111,22,30,31,32,40}: djangorestframework\n\n django{111,22,30,31,32,40}: mock\n django{111,22,30,31,32,40}: coverage\n linter: isort>=5.1\n linter: flake8\ncommands =\n linter: flake8 .\n linter: isort . --check-only --rr\n django{111,22,30,31,32,40}: coverage erase\n django{111,22,30,31,32,40}: coverage run ./runtests.py\n" }, { "alpha_fraction": 0.49917080998420715, "alphanum_fraction": 0.5074626803398132, "avg_line_length": 26.409090042114258, "blob_id": "b5cc0e81c92a9ab06ac79445e669bce9bc44c1b6", "content_id": "7b46fd74b1a9e04d20812af5e19bcc69b40713be", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "permissive", "max_line_length": 96, "num_lines": 22, "path": "/drf_batch_example/views.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from django.http import JsonResponse\n\nfrom rest_framework.views import APIView\n\n\nclass TestView(APIView):\n def get(self, request, *args, **kwargs):\n return self.finalize_response(request, JsonResponse({\n 'id': 1,\n 'data': [\n {'id': '1'},\n {'id': '2'},\n {'id': '3'},\n {'id': '4'},\n ],\n 'empty_argument': None\n }))\n\n def post(self, request, *args, **kwargs):\n return self.finalize_response(request, JsonResponse({'data': request.data.get('data')}))\n\n# todo: add CBV and FBV\n" }, { "alpha_fraction": 0.5416666865348816, "alphanum_fraction": 0.5506486892700195, "avg_line_length": 35.7706413269043, "blob_id": "aa13ed17688278f7d7b1ebee03fb0d27931c67c6", "content_id": "8c9e642a55f6e0ad30254cf259dcf7fb34a5c880", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 4008, "license_type": "permissive", "max_line_length": 121, "num_lines": 109, "path": "/readme.rst", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "DRF batch requests\n==================\n\n|PyPI version| |Travis CI| |Coverage Status| |Code Health| |Python\nVersions| |Implementation|\n\nQuick start\n-----------\n\nexamples:\n\n::\n\n curl -X POST \\\n http://127.0.0.1:8000/batch/ \\\n -H 'cache-control: no-cache' \\\n -H 'content-type: application/json' \\\n -d '{\"batch\": [\n {\n \"method\": \"get\",\n \"relative_url\": \"/test/\",\n \"name\": \"yolo\"\n },\n {\n \"method\": \"post\",\n \"relative_url\": \"/test/?id={result=yolo:$.id}&ids={result=yolo:$.data.*.id}\",\n \"body\": {\"data\": {\"id\": \"{result=yolo:$.id}\", \"ids\": \"{result=yolo:$.data.*.id}\"}, \"test\": \"yolo\"}\n },\n {\n \"method\": \"post\",\n \"relative_url\": \"/test/\",\n \"body\": \"{\\\"data\\\": 42}\",\n \"omit_response_on_success\": true\n },\n {\n \"method\": \"options\",\n \"relative_url\": \"/test/\"\n }\n ]\n }'\n\nusing file uploading\n\n::\n\n curl -X POST \\\n http://127.0.0.1:8000/batch/ \\\n -H 'cache-control: no-cache' \\\n -H 'content-type: multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW' \\\n -F 'batch=[\n {\n \"method\": \"get\",\n \"relative_url\": \"/test/\",\n \"name\": \"yolo\"\n },\n {\n \"method\": \"post\",\n \"relative_url\": \"/test/?id={result=yolo:$.id}&ids={result=yolo:$.data.*.id}\",\n \"body\": {\"data\": \"{result=yolo:$.data.*.id}\", \"test\": \"yolo\"},\n \"attached_files\":{\"file\": \"a.jpg\"}\n },\n {\n \"method\": \"post\",\n \"relative_url\": \"/test/\",\n \"body\": \"{\\\"data\\\": 42}\",\n \"omit_response_on_success\": true,\n \"attached_files\":[\"a.jpg\", \"b.png\"]\n },\n {\n \"method\": \"options\",\n \"relative_url\": \"/test/\"\n }\n ]' \\\n -F [email protected] \\\n -F a.jpg=@check_133.pdf\n\nFuture features:\n\n- add support for requests pipelining. use responses as arguments to\n next requests (done)\n- build graph based on requests dependencies & run simultaneously\n independent.\n- [STRIKEOUT:switchable atomic support. true - all fails if something\n wrong. else - fail only dependent (can be very hard to support on\n front-end side, but for now seems as good feature)] run all requests\n in single transaction. (done)\n- [STRIKEOUT:use native django. we don't use complicated things that\n require drf for work. all can be done with \"naked\" django.] (since we\n validate requests with drf serializers, it's better to leave as it\n is).\n- support files uploading (done)\n\nDependencies:\n\n- Django starting from 2.2\n- Django rest framework\n\n.. |PyPI version| image:: https://badge.fury.io/py/drf-batch-requests.svg\n :target: https://badge.fury.io/py/drf-batch-requests\n.. |Travis CI| image:: https://travis-ci.org/roman-karpovich/drf-batch-requests.svg?branch=master\n :target: https://travis-ci.org/roman-karpovich/drf-batch-requests\n.. |Coverage Status| image:: https://coveralls.io/repos/github/roman-karpovich/drf-batch-requests/badge.svg?branch=master\n :target: https://coveralls.io/github/roman-karpovich/drf-batch-requests?branch=master\n.. |Code Health| image:: https://landscape.io/github/roman-karpovich/drf-batch-requests/master/landscape.svg?style=flat\n :target: https://landscape.io/github/roman-karpovich/drf-batch-requests/master\n.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/drf-batch-requests.svg?style=flat-square\n :target: https://pypi.python.org/pypi/drf-batch-requests\n.. |Implementation| image:: https://img.shields.io/pypi/implementation/drf-batch-requests.svg?style=flat-square\n :target: https://pypi.python.org/pypi/drf-batch-requests\n" }, { "alpha_fraction": 0.5832483768463135, "alphanum_fraction": 0.5854333639144897, "avg_line_length": 35.71123123168945, "blob_id": "f960a873024e5769df7b7136614dbb496baa566b", "content_id": "1b1e64ba850922b7bcd6e63d053f8516ff12aa74", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6865, "license_type": "permissive", "max_line_length": 114, "num_lines": 187, "path": "/drf_batch_requests/request.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "import json\nimport re\nfrom io import BytesIO\nfrom urllib.parse import urlsplit\n\nfrom django.http import HttpRequest\nfrom django.http.request import QueryDict\n\ntry:\n from django.utils.encoding import force_text\nexcept ImportError:\n from django.utils.encoding import force_str as force_text\n\nfrom rest_framework.exceptions import ValidationError\n\nfrom drf_batch_requests.exceptions import RequestAttributeError\nfrom drf_batch_requests.serializers import BatchRequestSerializer\nfrom drf_batch_requests.utils import get_attribute\n\n\nclass BatchRequest(HttpRequest):\n\n def __init__(self, request, request_data):\n super(BatchRequest, self).__init__()\n self.name = request_data.get('name')\n self.omit_response_on_success = request_data.get('omit_response_on_success', False)\n\n self._stream = BytesIO(request_data['_body'].encode('utf-8'))\n self._read_started = False\n\n self.method = request_data['method']\n\n split_url = urlsplit(request_data['relative_url'])\n self.path_info = self.path = split_url.path\n\n self.GET = QueryDict(split_url.query)\n self._set_headers(request, request_data.get('headers', {}))\n self.COOKIES = request.COOKIES\n\n # Standard WSGI supported headers\n # (are not prefixed with HTTP_)\n _wsgi_headers = [\"content_length\", \"content_type\", \"query_string\",\n \"remote_addr\", \"remote_host\", \"remote_user\",\n \"request_method\", \"server_name\", \"server_port\"]\n\n def _set_headers(self, request, headers):\n \"\"\"\n Inherit headers from batch request by default.\n Override with values given in subrequest.\n \"\"\"\n self.META = request.META if request is not None else {}\n if headers is not None:\n self.META.update(self._transform_headers(headers))\n\n def _transform_headers(self, headers):\n \"\"\"\n For every header:\n - replace - to _\n - prepend http_ if necessary\n - convert to uppercase\n \"\"\"\n result = {}\n for header, value in headers.items():\n header = header.replace(\"-\", \"_\")\n header = \"http_{header}\".format(header=header) \\\n if header.lower() not in self._wsgi_headers \\\n else header\n result.update({header.upper(): value})\n return result\n\n\nclass BatchRequestsFactory(object):\n response_variable_regex = re.compile(r'({result=(?P<name>[\\w\\d_]+):\\$\\.(?P<value>[\\w\\d_.*]+)})')\n\n def __init__(self, request):\n self.request = request\n self.request_serializer = BatchRequestSerializer(data=request.data)\n self.request_serializer.is_valid(raise_exception=True)\n self.update_soft_dependencies()\n\n self.named_responses = {}\n\n def update_soft_dependencies(self):\n for request_data in self.request_serializer.validated_data['batch']:\n parents = request_data.get('depends_on', [])\n\n for part in request_data.values():\n params = re.findall(\n self.response_variable_regex, force_text(part)\n )\n\n parents.extend(map(lambda param: param[1], params or []))\n\n request_data['depends_on'] = set(parents)\n\n def _prepare_formdata_body(self, data, files=None):\n if not data and not files:\n return ''\n\n match = re.search(r'boundary=(?P<boundary>.+)', self.request.content_type)\n assert match\n boundary = match.groupdict()['boundary']\n body = ''\n for key, value in data.items():\n value = value if isinstance(value, str) else json.dumps(value)\n body += '--{}\\r\\nContent-Disposition: form-data; name=\"{}\"\\r\\n\\r\\n{}\\r\\n'.format(boundary, key, value)\n\n if files:\n for key, attachment in files.items():\n attachment.seek(0)\n attachment_body_part = '--{0}\\r\\nContent-Disposition: form-data; name=\"{1}\"; filename=\"{2}\"\\r\\n' \\\n 'Content-Type: {3}\\r\\n' \\\n 'Content-Transfer-Encoding: binary\\r\\n\\r\\n{4}\\r\\n'\n body += attachment_body_part.format(\n boundary, key, attachment.name, attachment.content_type, attachment.read()\n )\n\n body += '--{}--\\r\\n'.format(boundary)\n return body\n\n def _prepare_urlencoded_body(self, data):\n raise NotImplementedError\n\n def _prepare_json_body(self, data):\n return json.dumps(data)\n\n def _process_attr(self, attr):\n params = re.findall(\n self.response_variable_regex, attr\n )\n if not params:\n return attr\n\n for url_param in params:\n if url_param[1] not in self.named_responses:\n raise ValidationError('Named request {} is missing'.format(url_param[1]))\n\n result = get_attribute(\n self.named_responses[url_param[1]].data,\n url_param[2].split('.')\n )\n\n if result is None:\n raise RequestAttributeError('Empty result for {}'.format(url_param[2]))\n\n if isinstance(result, list):\n result = ','.join(map(str, result))\n\n if attr == url_param[0]:\n attr = result\n else:\n attr = attr.replace(url_param[0], str(result))\n\n return attr\n\n def updated_obj(self, obj):\n \"\"\"\n For now, i'll update only dict values. Later it can be used for keys/single values/etc\n :param obj: dict\n :return: dict\n \"\"\"\n if isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = self.updated_obj(value)\n elif isinstance(obj, str):\n return self._process_attr(obj)\n\n return obj\n\n def get_requests_data(self):\n return self.request_serializer.validated_data['batch']\n\n def generate_request(self, request_data):\n request_data['data'] = self.updated_obj(request_data['data'])\n request_data['relative_url'] = self._process_attr(request_data['relative_url'])\n\n if self.request.content_type.startswith('multipart/form-data'):\n request_data['_body'] = self._prepare_formdata_body(request_data['data'],\n files=request_data.get('files', {}))\n elif self.request.content_type.startswith('application/x-www-form-urlencoded'):\n request_data['_body'] = self._prepare_urlencoded_body(request_data['data'])\n elif self.request.content_type.startswith('application/json'):\n request_data['_body'] = self._prepare_json_body(request_data['data'])\n else:\n raise ValidationError('Unsupported content type')\n\n return BatchRequest(self.request, request_data)\n" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 18.384614944458008, "blob_id": "d5ec5e5e29df3b84ebd46cef90436fc32d19eb03", "content_id": "15e1791b49b5d19285c0cacb2e7277f3fa099eb6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 252, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/drf_batch_requests/urls.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "try:\n from django.conf.urls import url\nexcept ImportError:\n # django 2.0\n from django.urls import re_path as url\n\nfrom drf_batch_requests import views\n\napp_name = 'drt_batch_requests'\n\nurlpatterns = [\n url('^', views.BatchView.as_view())\n]\n" }, { "alpha_fraction": 0.7678571343421936, "alphanum_fraction": 0.7678571343421936, "avg_line_length": 27, "blob_id": "79c26b2eb38dad7bd64734940557c899c72becad", "content_id": "9c8dd32b26120f21d1bf5aec37a587774ebc12e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 168, "license_type": "permissive", "max_line_length": 64, "num_lines": 6, "path": "/drf_batch_requests/exceptions.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "class BatchRequestException(Exception):\n pass\n\n\nclass RequestAttributeError(BatchRequestException):\n \"\"\" Empty request attribute. Unable to perform request. \"\"\"\n" }, { "alpha_fraction": 0.665217399597168, "alphanum_fraction": 0.6695652008056641, "avg_line_length": 29.66666603088379, "blob_id": "9287baabb0ad52470886d5648106a13ea67ef46b", "content_id": "dda94cb6dc8164e496f340a855f3c4aebf99fdc2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "permissive", "max_line_length": 77, "num_lines": 15, "path": "/tests/urls.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "try:\n from django.conf.urls import include, url\nexcept ImportError:\n # django 2.0\n from django.urls import re_path as url, include\n\nfrom tests import views\n\nurlpatterns = [\n url('batch/', include('drf_batch_requests.urls', namespace='drf_batch')),\n url('test/', views.TestAPIView.as_view()),\n url('test_fbv/', views.test_fbv),\n url('test-files/', views.TestFilesAPIView.as_view()),\n url('test-non-json/', views.SimpleView.as_view()),\n]\n" }, { "alpha_fraction": 0.6186291575431824, "alphanum_fraction": 0.6192150115966797, "avg_line_length": 36.93333435058594, "blob_id": "51b8c0c1150ea05bb2840c560082b4e3d25c1851", "content_id": "a04f7bbfccc5d78a3d0fdebc08e8d0de0b0fe758", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3414, "license_type": "permissive", "max_line_length": 111, "num_lines": 90, "path": "/drf_batch_requests/views.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from importlib import import_module\n\nfrom django.db import transaction\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom drf_batch_requests import settings as app_settings\nfrom drf_batch_requests.exceptions import RequestAttributeError\nfrom drf_batch_requests.graph import RequestGraph\nfrom drf_batch_requests.request import BatchRequestsFactory\nfrom drf_batch_requests.response import BatchResponse, DummyBatchResponse, ResponseHeader\nfrom drf_batch_requests.utils import generate_node_callback\n\ntry:\n from json import JSONDecodeError\nexcept ImportError:\n JSONDecodeError = ValueError\n\n\nclass BatchView(APIView):\n permission_classes = []\n\n def get_requests_consumer_class(self):\n mod, inst = app_settings.REQUESTS_CONSUMER_BACKEND.rsplit('.', 1)\n mod = import_module(mod)\n return getattr(mod, inst)\n\n def get_requests_consumer(self):\n return self.get_requests_consumer_class()()\n\n @transaction.atomic\n def post(self, request, *args, **kwargs):\n requests = {}\n responses = {}\n\n requests_factory = BatchRequestsFactory(request)\n requests_data = requests_factory.get_requests_data()\n ordered_names = list(map(lambda r: r['name'], requests_data))\n requests_graph = RequestGraph(requests_data)\n\n backend = self.get_requests_consumer()\n\n while True:\n available_nodes = list(requests_graph.get_current_available_nodes())\n\n for node in available_nodes:\n try:\n current_request = requests_factory.generate_request(node.request)\n except RequestAttributeError:\n # todo: set fail reason\n node.fail()\n\n start_callback = generate_node_callback(node, 'start')\n success_callback = generate_node_callback(node, 'success')\n fail_callback = generate_node_callback(node, 'fail')\n if backend.consume_request(current_request, start_callback=start_callback,\n success_callback=success_callback, fail_callback=fail_callback):\n requests[node.name] = current_request\n\n is_completed = requests_graph.is_completed()\n\n for current_request, response in backend.responses.items():\n if current_request.name in responses:\n continue\n\n header_items = response.items()\n\n result = BatchResponse(\n current_request.name,\n response.status_code,\n response.content.decode('utf-8'),\n headers=[\n ResponseHeader(key, value)\n for key, value in header_items\n ],\n omit_response_on_success=current_request.omit_response_on_success,\n status_text=response.reason_phrase\n )\n\n if current_request.name:\n requests_factory.named_responses[current_request.name] = result\n\n responses[current_request.name] = result.to_dict()\n\n if is_completed:\n break\n\n ordered_responses = [responses.get(name, DummyBatchResponse(name).to_dict()) for name in ordered_names]\n return self.finalize_response(request, Response(ordered_responses))\n" }, { "alpha_fraction": 0.6134045720100403, "alphanum_fraction": 0.6134045720100403, "avg_line_length": 31.944000244140625, "blob_id": "e1f698fae88facd55972b5b0242b7a95d3633119", "content_id": "0b565ffa52e44e50de46dbf1306d84e339db33a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4118, "license_type": "permissive", "max_line_length": 120, "num_lines": 125, "path": "/drf_batch_requests/serializers.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "import json\n\nfrom django.core.files import File\n\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom drf_batch_requests.utils import generate_random_id\n\n\nclass SingleRequestSerializer(serializers.Serializer):\n method = serializers.CharField()\n relative_url = serializers.CharField()\n\n headers = serializers.JSONField(required=False)\n name = serializers.CharField(required=False)\n depends_on = serializers.JSONField(required=False)\n body = serializers.JSONField(required=False, default={})\n # attached files formats: [\"a.jpg\", \"b.png\"] - will be attached as it is, {\"file\": \"a.jpg\"} - attach as specific key\n attached_files = serializers.JSONField(required=False)\n data = serializers.SerializerMethodField()\n files = serializers.SerializerMethodField()\n\n def validate_headers(self, value):\n if isinstance(value, dict):\n return value\n\n def validate_relative_url(self, value):\n if not value.startswith('/'):\n raise self.fail('Url should start with /')\n\n return value\n\n def validate_body(self, value):\n if isinstance(value, dict):\n return value\n\n try:\n json.loads(value)\n except (TypeError, ValueError):\n self.fail('invalid')\n\n return value\n\n def validate(self, attrs):\n if 'name' not in attrs:\n attrs['name'] = generate_random_id()\n\n if 'depends_on' in attrs:\n value = attrs['depends_on']\n if not isinstance(value, (str, list)):\n raise ValidationError({'depends_on': 'Incorrect value provided'})\n\n if isinstance(value, str):\n attrs['depends_on'] = [value]\n\n return attrs\n\n def get_data(self, data):\n body = data['body']\n if isinstance(body, dict):\n return body\n\n return json.loads(body)\n\n def get_files(self, attrs):\n if 'attached_files' not in attrs:\n return []\n\n attached_files = attrs['attached_files']\n if isinstance(attached_files, dict):\n return {\n key: self.context['parent'].get_files()[attrs['attached_files'][key]] for key in attrs['attached_files']\n }\n elif isinstance(attached_files, list):\n return {\n key: self.context['parent'].get_files()[key] for key in attrs['attached_files']\n }\n else:\n raise ValidationError('Incorrect format.')\n\n\nclass BatchRequestSerializer(serializers.Serializer):\n batch = serializers.JSONField()\n files = serializers.SerializerMethodField()\n\n def get_files(self, attrs=None):\n return {fn: f for fn, f in self.initial_data.items() if isinstance(f, File)}\n\n def validate_batch(self, value):\n if not isinstance(value, list):\n raise ValidationError('List of requests should be provided to do batch')\n\n r_serializers = list(map(lambda d: SingleRequestSerializer(data=d, context={'parent': self}), value))\n\n errors = []\n for serializer in r_serializers:\n serializer.is_valid()\n errors.append(serializer.errors)\n if any(errors):\n raise ValidationError(errors)\n\n return [s.data for s in r_serializers]\n\n def validate(self, attrs):\n attrs = super(BatchRequestSerializer, self).validate(attrs)\n\n files_in_use = []\n for batch in attrs['batch']:\n if 'attached_files' not in batch:\n continue\n\n attached_files = batch['attached_files']\n if isinstance(attached_files, dict):\n files_in_use.extend(attached_files.values())\n elif isinstance(attached_files, list):\n files_in_use.extend(attached_files)\n else:\n raise ValidationError({'attached_files': 'Invalid format.'})\n\n missing_files = set(files_in_use) - set(self.get_files().keys())\n if missing_files:\n raise ValidationError('Some of files are not provided: {}'.format(', '.join(missing_files)))\n\n return attrs\n" }, { "alpha_fraction": 0.6455854773521423, "alphanum_fraction": 0.6455854773521423, "avg_line_length": 31.59183692932129, "blob_id": "ce5e28932ebcda42533dae7402c9661c98d6a9a9", "content_id": "a2a5bd4a42689139184b10b0fa95a967c937f61b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1597, "license_type": "permissive", "max_line_length": 98, "num_lines": 49, "path": "/tests/mixins.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "try:\n from django.urls import resolve\nexcept ImportError:\n from django.core.urlresolvers import resolve\n\nfrom rest_framework.test import APIRequestFactory\nfrom rest_framework.test import APITestCase as OriginalAPITestCase\nfrom rest_framework.test import force_authenticate\n\n\nclass APITestCase(OriginalAPITestCase):\n \"\"\"\n Base test case for testing APIs\n \"\"\"\n maxDiff = None\n\n def __init__(self, *args, **kwargs):\n super(APITestCase, self).__init__(*args, **kwargs)\n self.user = None\n\n def forced_auth_req(self, method, url, user=None, data=None, request_format='json', **kwargs):\n \"\"\"\n Function that allows api methods to be called with forced authentication\n\n :param method: the HTTP method 'get'/'post'\n :type method: str\n :param url: the relative url to the base domain\n :type url: st\n :param user: optional user if not authenticated as the current user\n :type user: django.contrib.auth.models.User\n :param data: any data that should be passed to the API view\n :type data: dict\n \"\"\"\n factory = APIRequestFactory()\n view_info = resolve(url)\n\n data = data or {}\n view = view_info.func\n req_to_call = getattr(factory, method)\n request = req_to_call(url, data, format=request_format, **kwargs)\n\n user = user or self.user\n force_authenticate(request, user=user)\n\n response = view(request, *view_info.args, **view_info.kwargs)\n if hasattr(response, 'render'):\n response.render()\n\n return response\n" }, { "alpha_fraction": 0.6884615421295166, "alphanum_fraction": 0.6961538195610046, "avg_line_length": 19, "blob_id": "9814e503f420ac68c34a5acd19d44aa59391e9e9", "content_id": "556f5c6dba9f4d955894d4c0fab57ff9a3476fa3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "permissive", "max_line_length": 42, "num_lines": 13, "path": "/drf_batch_example/urls.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "try:\n from django.conf.urls import url\nexcept ImportError:\n # django 2.0\n from django.urls import re_path as url\n\nfrom drf_batch_example import views\n\napp_name = 'drf_batch_requests_tests'\n\nurlpatterns = [\n url('test', views.TestView.as_view()),\n]\n" }, { "alpha_fraction": 0.5530410408973694, "alphanum_fraction": 0.5586987137794495, "avg_line_length": 26.19230842590332, "blob_id": "733b43954d4496532f93ea55cbbd98edf771955d", "content_id": "3d422a515d2cf8c919096d830c5be3de81fac0bb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 707, "license_type": "permissive", "max_line_length": 52, "num_lines": 26, "path": "/tests/test_request.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from django.test import SimpleTestCase\n\nfrom rest_framework.test import APIRequestFactory\n\nfrom drf_batch_requests.request import BatchRequest\n\n\nclass RequestTest(SimpleTestCase):\n\n def test_subrequest_headers(self):\n # Arrange\n data = {\n 'method': 'get',\n 'relative_url': '/test/',\n 'headers': {\n 'header-1': 'whatever',\n 'Content-Length': 56,\n },\n '_body': ''\n }\n request = APIRequestFactory().post('/test')\n # Act\n result = BatchRequest(request, data)\n # Assert\n self.assertIn('HTTP_HEADER_1', result.META)\n self.assertIn('CONTENT_LENGTH', result.META)\n" }, { "alpha_fraction": 0.6997690796852112, "alphanum_fraction": 0.6997690796852112, "avg_line_length": 29.928571701049805, "blob_id": "babc7b36b8bf71d44bdbb402979eb7621c285d90", "content_id": "481495da36fc5ea08ea180d86875f58358421b23", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 866, "license_type": "permissive", "max_line_length": 103, "num_lines": 28, "path": "/drf_batch_requests/backends/sync.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from django.core.handlers.base import BaseHandler\n\nfrom rest_framework.status import is_success\n\nfrom drf_batch_requests.backends.base import RequestsConsumeBaseBackend\n\n\nclass SyncRequestsConsumeBackend(RequestsConsumeBaseBackend):\n def __init__(self):\n self.responses = {}\n\n # todo: from this point i think we can consume requests pack\n def consume_request(self, request, start_callback=None, success_callback=None, fail_callback=None):\n start_callback() if start_callback else None\n\n handler = BaseHandler()\n handler.load_middleware()\n\n response = handler.get_response(request)\n\n if is_success(response.status_code):\n success_callback() if success_callback else None\n else:\n fail_callback() if fail_callback else None\n\n self.responses[request] = response\n\n return True\n" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.571628212928772, "avg_line_length": 29.446807861328125, "blob_id": "4ba9650c2085a8fbf407147e356e5cefb316daf0", "content_id": "15471663100c4acaafe22f937433d253736da53d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 92, "num_lines": 47, "path": "/tests/views.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "from django.http import JsonResponse\nfrom django.http.response import HttpResponse as DjangoResponse\nfrom django.views.generic import View\n\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\n\nclass TestAPIView(APIView):\n def get(self, request, *args, **kwargs):\n return self.finalize_response(request, Response({\n 'data': [\n {'id': 1, 'some_data': 'foo'},\n {'id': 2, 'some_data': 'bar'},\n {'id': 3, 'some_data': 'baz'},\n ],\n 'page': 1,\n 'get': request.query_params\n }))\n\n def post(self, request, *args, **kwargs):\n return self.finalize_response(request, Response({'data': request.data.get('data')}))\n\n\ndef test_fbv(request):\n if request.method == 'POST':\n return JsonResponse(request.POST)\n else:\n return JsonResponse({'field1': 'field1_value', 'field2': 'field2_value'})\n\n\nclass TestFilesAPIView(APIView):\n def post(self, request, *args, **kwargs):\n return self.finalize_response(request, Response({\n 'files': {\n key: {\n 'name': attachment.name,\n 'size': attachment.size\n }\n for key, attachment in request.FILES.items()\n }\n }))\n\n\nclass SimpleView(View):\n def get(self, request):\n return DjangoResponse('test non-json output')\n" }, { "alpha_fraction": 0.6003033518791199, "alphanum_fraction": 0.6021994948387146, "avg_line_length": 32.37974548339844, "blob_id": "f74efb43ac47fa3e667421a7e36d7029a56e5999", "content_id": "904fdeba88f022217516bf4ae68903489787e4a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2637, "license_type": "permissive", "max_line_length": 111, "num_lines": 79, "path": "/drf_batch_requests/graph.py", "repo_name": "roman-karpovich/drf-batch-requests", "src_encoding": "UTF-8", "text": "class RequestGraphNode(object):\n STATUS_FAILED = -1\n STATUS_FAILED_PARENT = -2\n STATUS_NOT_STARTED = 0\n STATUS_IN_PROGRESS = 1\n STATUS_COMPLETED = 2\n\n def __init__(self, request=None):\n self.request = request\n self.name = self.request.get('name')\n self.parents = set()\n self.children_set = set()\n self.status = self.STATUS_NOT_STARTED\n\n def start(self):\n self.status = RequestGraphNode.STATUS_IN_PROGRESS\n\n def complete(self):\n self.status = RequestGraphNode.STATUS_COMPLETED\n\n def fail(self, own_fail=True):\n self.status = RequestGraphNode.STATUS_FAILED if own_fail else RequestGraphNode.STATUS_FAILED_PARENT\n for child_node in filter(lambda n: n.status == RequestGraphNode.STATUS_NOT_STARTED, self.children_set):\n child_node.fail(own_fail=False)\n\n @property\n def can_be_performed(self):\n if not self.status == RequestGraphNode.STATUS_NOT_STARTED:\n return False\n\n return all(map(lambda parent: parent.status == RequestGraphNode.STATUS_COMPLETED, self.parents))\n\n def __str__(self):\n return self.name or super(RequestGraphNode, self).__str__()\n\n\nclass RequestGraph(object):\n def __init__(self, requests):\n self.nodes = [RequestGraphNode(request) for request in requests]\n self._named_requests = {\n node.request['name']: node\n for node in filter(lambda n: n.request.get('name'), self.nodes)\n }\n\n for node in self.nodes:\n parents = node.request.get('depends_on', [])\n\n for parent_name in parents:\n parent = self._named_requests.get(parent_name)\n if not parent:\n raise Exception('Wrong parent {} in node.'.format(parent_name))\n\n node.parents.add(parent)\n parent.children_set.add(node)\n\n def get_node_order(self, node):\n return self.nodes.index(node)\n\n def get_not_failed_nodes(self):\n return filter(\n lambda node: node.status not in [\n RequestGraphNode.STATUS_FAILED,\n RequestGraphNode.STATUS_FAILED_PARENT\n ],\n self.nodes\n )\n\n def get_current_available_nodes(self):\n return filter(lambda node: node.can_be_performed, self.get_not_failed_nodes())\n\n def is_completed(self):\n return all(map(\n lambda node: node.status in [\n RequestGraphNode.STATUS_FAILED,\n RequestGraphNode.STATUS_FAILED_PARENT,\n RequestGraphNode.STATUS_COMPLETED\n ],\n self.nodes\n ))\n" } ]
22
patilo/python-disenioclases
https://github.com/patilo/python-disenioclases
c29a3c691d60ef943a6af1679138f8cf714f1943
e8302612e2d4ee3414258746798a153aafa3a646
e04e016a18eb5d0e656d1e200e762fd6a5ae6610
refs/heads/main
2023-07-28T04:34:23.193389
2021-09-13T21:22:30
2021-09-13T21:22:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5971731543540955, "alphanum_fraction": 0.6183745861053467, "avg_line_length": 26, "blob_id": "87b9feab90aeb1c35252a403fa4358362fde65cd", "content_id": "fe365491ee9b3e601ffc231c2394d93f336151bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 95, "num_lines": 21, "path": "/Productos.py", "repo_name": "patilo/python-disenioclases", "src_encoding": "UTF-8", "text": "class Productos:\n\n contadorProductos = 0\n\n def __init__(self,nombre,precio):\n Productos.contadorProductos +=1\n self._idProducto = Productos.contadorProductos\n self._nombre = nombre\n self._precio = precio\n\n @property\n def precio(self):\n return self._precio\n\n def __str__(self):\n return f' el producto es: {self._idProducto}, {self._nombre}, y precio: {self._precio}'\n\nif __name__ == '__main__':\n producto1 = Productos('camisa', 500)\n producto2 = Productos('pantalon', 800)\n print(producto1, producto2)" }, { "alpha_fraction": 0.7972972989082336, "alphanum_fraction": 0.8040540814399719, "avg_line_length": 73, "blob_id": "dc20b5c80b416fc46e876dfba4dbf2a2c65e91c9", "content_id": "17192bc5df4c03dd55df4c49b21bc642b39cf445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 149, "license_type": "no_license", "max_line_length": 124, "num_lines": 2, "path": "/README.md", "repo_name": "patilo/python-disenioclases", "src_encoding": "UTF-8", "text": "# python-disenioclases\ncreación de 2 tipos de clases en la cual una llama a la otra usando la propiedad import-from para hacer llamado a otra clase\n" } ]
2
Ph12112/Ph12112
https://github.com/Ph12112/Ph12112
3c814e0e4d2661fe11eaaedd1837f336cfcced5c
34d48ea9bf1cff997026b35d5550f24836aa9cbd
805f7e243944222ec67d06bd83bcd7f059c34ae5
refs/heads/main
2023-08-28T10:24:26.087017
2021-10-24T11:40:58
2021-10-24T11:40:58
403,223,611
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5224999785423279, "alphanum_fraction": 0.5493749976158142, "avg_line_length": 23.90625, "blob_id": "5bd09c8bad4530800c2041c6f43191d9c7dc7d65", "content_id": "70ff0ca59c648b859c7d32aa2daf118c297754ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1806, "license_type": "no_license", "max_line_length": 91, "num_lines": 64, "path": "/031902512/checkSensitive/src/commonUtil.py", "repo_name": "Ph12112/Ph12112", "src_encoding": "UTF-8", "text": "#encoding:utf-8\nfrom __future__ import division\n\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n# sys.path.append('/Users/luyao/anaconda/lib/python2.7/site-packages')\nimport re\n\n\n\ndef getReg(txt_convert):\n \"\"\"\n 对文本进行正则过滤,检测广告、链接等信息\n :param txt: 文本\n :return: 正则过滤后的文本\n \"\"\"\n url_patten = r\"([^\\s]+(\\.com))|([a-zA-z]+://[^\\s]*)\" #http://xxx, www.xxxx.com, [email protected]\n html_patten=r\"<(\\S*?)[^>]*>.*?|<.*? />\"\n qq_phone_patten=r\"[1-9][0-9]{4,}\" #第一位1-9之间的数字,第二位0-9之间的数字,大于1000号\n wx_patten=r\"[a-zA-Z][a-zA-Z0-9_-]{5,19}$\"\n\n if re.findall(url_patten,txt_convert).__len__()>0:\n result = u\"疑似[网页链接或邮箱]\"\n elif re.findall(html_patten,txt_convert).__len__()>0:\n result = u\"疑似[html脚本]\"\n elif re.findall(qq_phone_patten,txt_convert).__len__()>0:\n result = u\"疑似[QQ号或手机号]\"\n elif re.findall(wx_patten,txt_convert).__len__()>0:\n result = u\"疑似[微信号]\"\n else:\n result = u\"非广告文本\"\n return result\n\n\n\ndef calcScore(sensitiveWordStr):\n b=sensitiveWordStr\n b1=b.split(\",\")\n b2=[i.split(\":\")[0] for i in b1 if len(i) > 1]\n\n score = 0\n for x in b2:\n if x in (u\"毒品\", u\"色情\", u\"赌博\"):\n score += 5\n elif x in (u\"政治\", u\"反动\", u\"暴恐\"):\n score += 4\n elif x == u\"社会\":\n score += 3\n else: #其他\n score += 2\n return score\n\n\n\ndef calcGrade(score,sensitive_list_word_length,txt_length):\n if score>15 and sensitive_list_word_length/txt_length>=0.33:\n suggest=u\"删除\"\n elif score==0:\n suggest=u\"通过\"\n else:\n suggest=u\"掩码\"\n return suggest\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5896735787391663, "alphanum_fraction": 0.6034504175186157, "avg_line_length": 28.734317779541016, "blob_id": "6c47e46356a4ab559363af7d52209d9905ad6424", "content_id": "6339bc1792441159e0a42b74a840a28c4e71949e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9819, "license_type": "no_license", "max_line_length": 258, "num_lines": 271, "path": "/031902512/checkSensitive/src/sensitiveApi.py", "repo_name": "Ph12112/Ph12112", "src_encoding": "UTF-8", "text": "#encoding:utf-8\nfrom __future__ import division\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport commonUtil, qbTransform\nimport json\nfrom flask import Flask,request,Response\nfrom flask_restful import Api\nfrom gevent.pywsgi import WSGIServer\nimport logging\n\n#pip install flask\n#pip install flask_restful\n#pip install gevent\n\nlogger = logging.getLogger(\"cccode\")\nformatter = logging.Formatter('%(asctime)s %(levelname)-8s: %(message)s')\nfile_handler = logging.FileHandler(\"sensitive.log\")\nfile_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\nlogger.setLevel(logging.INFO)\n\n\n\napp = Flask(__name__)\napi = Api(app)\n\n\nMinMatchType = 1 #最小匹配规则,如:敏感词库[\"中国\", \"中国人\"],语句:\"我是中国人\",匹配结果:我是[中国]人\nMaxMatchType = 2 #最大匹配规则,如:敏感词库[\"中国\", \"中国人\"],语句:\"我是中国人\",匹配结果:我是[中国人]\n\n\ndef initSensitiveWordMap(sensitiveWordSet):\n \"\"\"\n 初始化敏感词库,构建DFA算法模型\n :param sensitiveWordSet: 敏感词库,包括词语和其对应的敏感类别\n :return: DFA模型\n \"\"\"\n sensitiveWordMap=dict()\n for category,key in sensitiveWordSet:\n if type(key)=='unicode' and type(category)=='unicode' : #转换为unicode\n pass\n else:\n key=unicode(key)\n category=unicode(category)\n\n nowMap = sensitiveWordMap\n for i in range(len(key)):\n keyChar =key[i] # 转换成char型\n wordMap = nowMap.get(keyChar) #库中获取关键字\n #如果存在该key,直接赋值,用于下一个循环获取\n if wordMap != None:\n nowMap =wordMap\n else:\n #不存在则构建一个map,同时将isEnd设置为0,因为不是最后一个\n newWorMap = dict()\n #不是最后一个\n newWorMap[\"isEnd\"]=\"0\"\n nowMap[keyChar]=newWorMap\n nowMap = newWorMap\n #最后一个\n if i ==len(key)-1:\n nowMap[\"isEnd\"]=\"1\"\n nowMap[\"category\"]=category\n return sensitiveWordMap\n\n\ndef checkSensitiveWord(txt,beginIndex,matchType=MinMatchType):\n \"\"\"\n 检查文字中是否包含敏感字符\n :param txt:待检测的文本\n :param beginIndex: 调用getSensitiveWord时输入的参数,获取词语的上边界index\n :param matchType:匹配规则 1:最小匹配规则,2:最大匹配规则\n :return:如果存在,则返回敏感词字符的长度,不存在返回0\n \"\"\"\n flag=False\n category=\"\"\n matchFlag=0 #敏感词的长度\n nowMap=sensitiveWordMap\n tmpFlag=0 #包括特殊字符的敏感词的长度\n\n # print \"len(txt)\",len(txt) #9\n for i in range(beginIndex,len(txt)):\n word = txt[i]\n\n #检测是否是特殊字符,eg\"法&&轮&功...\"\n if word in stopWordSet and len(nowMap)<100:\n #len(nowMap)<100 保证已经找到这个词的开头之后出现的特殊字符\n #eg\"情节中,法&&轮&功...\"这个逗号不会被检测\n tmpFlag += 1\n continue\n\n\n #获取指定key\n nowMap=nowMap.get(word)\n if nowMap !=None: #存在,则判断是否为最后一个\n #找到相应key,匹配标识+1\n matchFlag+=1\n tmpFlag+=1\n #如果为最后一个匹配规则,结束循环,返回匹配标识数\n if nowMap.get(\"isEnd\")==\"1\":\n #结束标志位为true\n flag=True\n category=nowMap.get(\"category\")\n #最小规则,直接返回,最大规则还需继续查找\n if matchType==MinMatchType:\n break\n else: #不存在,直接返回\n break\n\n\n if matchFlag<2 or not flag: #长度必须大于等于1,为词\n tmpFlag=0\n return tmpFlag,category\n\n\ndef contains(txt,matchType=MinMatchType):\n \"\"\"\n 判断文字是否包含敏感字符\n :param txt: 待检测的文本\n :param matchType: 匹配规则 1:最小匹配规则,2:最大匹配规则\n :return: 若包含返回true,否则返回false\n \"\"\"\n flag=False\n for i in range(len(txt)):\n matchFlag=checkSensitiveWord(txt,i,matchType)[0]\n if matchFlag>0:\n flag=True\n return flag\n\n\ndef getSensitiveWord(txt,matchType=MinMatchType):\n \"\"\"\n 获取文字中的敏感词\n :param txt: 待检测的文本\n :param matchType: 匹配规则 1:最小匹配规则,2:最大匹配规则\n :return:文字中的敏感词\n \"\"\"\n sensitiveWordList=list()\n for i in range(len(txt)): #0---11\n length = checkSensitiveWord(txt, i, matchType)[0]\n category=checkSensitiveWord(txt, i, matchType)[1]\n if length>0:\n word=txt[i:i + length]\n sensitiveWordList.append(category+\":\"+word)\n i = i + length - 1\n return sensitiveWordList\n\n\ndef replaceSensitiveWord(txt, replaceChar, matchType=MinMatchType):\n \"\"\"\n 替换敏感字字符\n :param txt: 待检测的文本\n :param replaceChar:用于替换的字符,匹配的敏感词以字符逐个替换,如\"你是大王八\",敏感词\"王八\",替换字符*,替换结果\"你是大**\"\n :param matchType: 匹配规则 1:最小匹配规则,2:最大匹配规则\n :return:替换敏感字字符后的文本\n \"\"\"\n tupleSet = getSensitiveWord(txt, matchType)\n wordSet=[i.split(\":\")[1] for i in tupleSet]\n resultTxt=\"\"\n if len(wordSet)>0: #如果检测出了敏感词,则返回替换后的文本\n for word in wordSet:\n replaceString=len(word)*replaceChar\n txt = txt.replace(word, replaceString)\n resultTxt=txt\n else: #没有检测出敏感词,则返回原文本\n resultTxt = txt\n return resultTxt\n\n\n\n# 特殊字符集\nf = open(\"./data/stopword.txt\")\nstopWordSet = [i.split('\\n')[0] for i in f.readlines()]\n\n# 敏感词集\nf1 = open(\"./data/dict.txt\")\nlst = f1.readlines()\nsensitiveWordSet = [i.split(\"\\n\")[0].split(\"\\t\") for i in lst]\n# print u\"词汇总数:\", len(sensitiveWordSet)\nsensitiveWordMap = initSensitiveWordMap(sensitiveWordSet)\n# print u\"DFA结构词汇数:\", len(sensitiveWordMap)\n\n\n\[email protected]('/sensitive',methods=['POST'])\ndef get():\n # time_start = time.time()\n try:\n txt=request.json['txt']\n txt_length=len(txt)\n txt_convert= qbTransform.strQ2B(txt) #全角转半角\n reg_result= commonUtil.getReg(txt_convert) #正则过滤\n\n if reg_result==u\"非广告文本\":\n #是否包含敏感词\n contain = contains(txt=txt_convert,matchType=MaxMatchType) #默认 MinMatchType\n #敏感词和其类别\n sensitive_list = getSensitiveWord(txt=txt_convert, matchType=MaxMatchType) #默认 MinMatchType\n sensitive_list_str=u','.join(sensitive_list) #字符串形式的敏感词和其类别\n sensitive_list_word=[i.split(\":\")[1] for i in sensitive_list] #敏感词\n #敏感词的字数\n sensitive_list_word_length=0\n for word in sensitive_list_word :\n if len(word)<=1:\n continue\n sensitive_list_word_length+=len(word)\n\n #待检测语句的敏感度得分\n score= commonUtil.calcScore(sensitive_list_str)\n #待检测语句的敏感级别\n grade= commonUtil.calcGrade(score, sensitive_list_word_length, txt_length)\n #替换敏感词后的文本\n txt_replace=replaceSensitiveWord(txt=txt_convert,replaceChar='*',matchType=MaxMatchType) #默认MinMatchTYpe\n\n result_json={\n u\"txt\":txt,\n u\"txtLength\":txt_length,\n u\"regularResult\":reg_result,\n u\"ifContainSensitiveWord\":contain,\n u\"sensitiveWordCount\":len(sensitive_list),\n u\"sensitiveWordList\":\"[\"+sensitive_list_str+u\"]\",\n u\"score\":score,\n u\"grade\":grade,\n u\"txtReplace\":txt_replace\n }\n\n else:\n result_json={\n u\"txt\":txt,\n u\"txtLength\":txt_length,\n u\"regularResult\":reg_result,\n u\"grade\":u\"删除\"\n }\n\n\n result_log=json.dumps(result_json, encoding='utf-8', ensure_ascii=False)\n logger.info(result_log)\n\n # time_end = time.time()\n # print u\"运行时间:\", (time_end - time_start) * 1000, u\"ms\"\n\n except Exception,e:\n result_log={}\n logger.info(\"please check the input query! {} will be given by default---\"+str(e))\n\n r = Response(result_log, mimetype='application/json')\n r.headers['Content-Type'] = \"application/json; charset=utf-8\"\n\n return r\n\n\n\n\nif __name__ == '__main__':\n # app.run(host='127.0.0.1', port=4000, debug=True)\n WSGIServer(('localhost', 4000), app).serve_forever()\n\n\n\"\"\"\ncurl -H \"Content-type: application/json; charset=utf-8\" -X POST http://127.0.0.1:4000/sensitive -d '{\"txt\":\"访问 www.taobao.com\"}'\n\ncurl -H \"Content-type: application/json; charset=utf-8\" -X POST http://127.0.0.1:4000/sensitive -d '{\"txt\":\"小姐姐真漂亮,像个大王八,大王八\"}'\n\ncurl -H \"Content-type: application/json; charset=utf-8\" -X POST http://127.0.0.1:4000/sensitive -d '{\"txt\":\"国家主席习近平在中国青岛主持上海合作组织成员国元首理事会第十八次会议。王八蛋 荧幕中的情节。然后就是fuck dog 跟随着egg 主人公怒哀乐情节中。法.轮#功 难过就躺在某一个人的怀里,尽情的阐述心扉或者手机卡复制器,一个贱人一杯红酒一部电影在夜深人静的晚上,关上电话静静的发呆着。\"}'\n\n\"\"\"" }, { "alpha_fraction": 0.567585289478302, "alphanum_fraction": 0.614829421043396, "avg_line_length": 22.75, "blob_id": "4ebc90f0d66efa412dbb29eccf74ac996a449d7c", "content_id": "e9f95918035bd990ee033602a4aae7b2fbbf4469", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1948, "license_type": "no_license", "max_line_length": 80, "num_lines": 64, "path": "/031902512/checkSensitive/src/qbTransform.py", "repo_name": "Ph12112/Ph12112", "src_encoding": "UTF-8", "text": "#encoding:utf-8\nfrom __future__ import division\n\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n# sys.path.append('/Users/luyao/anaconda/lib/python2.7/site-packages')\n\n\ndef strQ2B(ustring):\n \"\"\"全角转半角\"\"\"\n rstring = \"\"\n if type(ustring)=='unicode':\n pass\n else:\n ustring=ustring.decode('utf-8')\n for uchar in ustring:\n inside_code = ord(uchar)\n if inside_code == 12288: #全角空格直接转换\n inside_code = 32\n elif (inside_code >= 65281 and inside_code <= 65374): # 全角字符(除空格)根据关系转化\n inside_code -= 65248\n\n rstring += unichr(inside_code)\n return rstring\n\n\ndef strB2Q(ustring):\n \"\"\"半角转全角\"\"\"\n rstring = \"\"\n if type(ustring)=='unicode':\n pass\n else:\n ustring=ustring.decode('utf-8')\n for uchar in ustring:\n inside_code = ord(uchar)\n if inside_code == 32: # 半角空格直接转化\n inside_code = 12288\n elif inside_code >= 32 and inside_code <= 126: # 半角字符(除空格)根据关系转化\n inside_code += 65248\n\n rstring += unichr(inside_code)\n return rstring\n\n\"\"\"\nchr()函数用一个范围在range(256)内的(就是0~255)整数作参数,返回一个对应的字符。\nunichr()跟它一样,只不过返回的是Unicode字符。\nord()函数是chr()函数(对于8位的ASCII字符串)或unichr()函数(对于Unicode对象)的配对函数,\n它以一个字符(长度为1的字符串)作为参数,返回对应的ASCII数值,或者Unicode数值。\n\"\"\"\n\n\n# b = strQ2B(u\"mn123abc博客 园\") #全角转半角\n# print b #mn123abc博客园\n#\n# c = strB2Q(u\"mn123abc 博客 园\") #半角转全角\n# print c #mn123abc博客园\n#\n# e = strQ2B(u\"I have a pen\")\n# print e #I have a pen\n#\n# d = strB2Q(u\"I have a pen\")\n# print d #I have a pen\n\n\n\n\n" } ]
3
fanghuiz/ufc-stats-crawler
https://github.com/fanghuiz/ufc-stats-crawler
e96b97998f66cff8e30a160389d0e7198a6d39d3
f13b6581aec890d46d9986471fd3adda83e8c801
889a16fd20cc538423f06f071219179f590fe343
refs/heads/master
2022-10-01T18:27:14.966762
2020-10-20T06:01:58
2020-10-20T06:01:58
236,715,345
20
17
MIT
2020-01-28T11:03:31
2022-07-04T20:49:34
2022-07-29T22:37:52
Julia
[ { "alpha_fraction": 0.5578486323356628, "alphanum_fraction": 0.5884928107261658, "avg_line_length": 29.169811248779297, "blob_id": "7bb1553c08d1d5210c9b34766fb6cbde78f44a44", "content_id": "c73fca76975ab2760ac19f99b0b177cc2be99a57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1599, "license_type": "permissive", "max_line_length": 75, "num_lines": 53, "path": "/ufcStats/utils.py", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "import datetime\nimport re\nfrom dateparser import parse\n\n\ndef get_element_atk(stat, element):\n if stat is None:\n return None\n f1_att = stat[0].split('of')[1].strip()\n f1_land = stat[0].split('of')[0].strip()\n f2_att = stat[1].split('of')[1].strip()\n f2_land = stat[1].split('of')[0].strip()\n if element == 'attempt':\n element = list([int(f1_att), int(f2_att)])\n if element == 'landed':\n element = list([int(f1_land), int(f2_land)])\n return element\n\n\ndef get_element_dmg(stat, element):\n if stat is None:\n return None\n f1_att = stat[0].split('of')[1].strip()\n f1_land = stat[0].split('of')[0].strip()\n f2_att = stat[1].split('of')[1].strip()\n f2_land = stat[1].split('of')[0].strip()\n if element == 'absorbed':\n # Absorbed - # landed by opponent\n f1_abs = int(f2_land)\n f2_abs = int(f1_land)\n element = list([f1_abs, f2_abs])\n if element == 'defended':\n # Defended - # attempts - # landed by opponent\n f1_def = int(f2_att) - int(f2_land)\n f2_def = int(f1_att) - int(f1_land)\n element = list([f1_def, f2_def])\n return element\n\n\ndef IS_Active(last_fight_date):\n \"\"\"\n Returns True if last fight date is less than 365 days from \n the date data lst fetched\n \"\"\"\n if last_fight_date is None:\n return True\n last_fight_date_delta = parse('today') - parse(last_fight_date)\n return last_fight_date_delta < datetime.timedelta(days=365)\n\n\ndef print_time(time):\n time = parse(time).replace(microsecond=0).isoformat().replace(':', '-')\n return time\n" }, { "alpha_fraction": 0.5472095012664795, "alphanum_fraction": 0.5573394298553467, "avg_line_length": 38.24250030517578, "blob_id": "4d6c78e07b827e9133f0eb3013e9c032010b393b", "content_id": "2e9617f691b2a21ae98ddd6e6926a26131f57fec", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15696, "license_type": "permissive", "max_line_length": 84, "num_lines": 400, "path": "/ufcStats/spiders/spider.py", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "import scrapy\nfrom scrapy.loader import ItemLoader\nfrom ufcStats.items import *\nfrom ufcStats.utils import *\n\n\nclass FightsSpider(scrapy.Spider):\n name = 'ufcFights'\n start_urls = ['http://ufcstats.com/statistics/events/completed?page=all']\n\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'ufcStats.pipelines.FightSummaryPipeline': 400,\n 'ufcStats.pipelines.FightStatsPipeline': 410\n }\n }\n\n def parse(self, response):\n \"\"\"\n Parse the event listing page, follow link to individual events page\n \"\"\"\n\n events_url = response.css(\n 'tbody .b-statistics__table-row ::attr(href)')\n\n for event in events_url:\n yield response.follow(event, callback=self.parse_event_link)\n\n def parse_event_link(self, response):\n \"\"\"\n Parse the event page, follow link to each individual fight page\n \"\"\"\n\n event_info = response.css('.b-list__box-list-item')\n date = event_info[0].css('::text').getall()[-1]\n location = event_info[1].css('::text').getall()[-1]\n fights_url = response.css(\n '.b-fight-details__table-row ::attr(data-link)')\n\n for fight in fights_url:\n yield response.follow(fight,\n callback=self.parse_fight_info,\n cb_kwargs=dict(date=date, location=location))\n\n def parse_fight_info(self, response, date, location):\n \"\"\"\n Parse fight info - fight level summary, and fighter stats\n \"\"\"\n\n ##### Fight summary ######\n fight_id = response.url.split('/')[-1]\n # date and location carry over from events page\n date = date.strip()\n location = location.strip()\n\n status = response.css(\n '.b-fight-details__person-status ::text').getall()\n\n # Fighter names\n names = response.css(\n '.b-fight-details__person-name :not(p)::text').getall()\n try:\n fighter_1 = names[0].strip()\n fighter_2 = names[1].strip()\n except:\n fighter_1 = None\n fighter_2 = None\n\n # IDs - Handle errors due to missing fighter link\n ids = response.css('.b-fight-details__person-name')\n fighter_1_id = ids[0].css('::attr(href)').get()\n fighter_2_id = ids[1].css('::attr(href)').get()\n\n if fighter_1_id is not None:\n fighter_1_id = fighter_1_id.split('/')[-1]\n if fighter_2_id is not None:\n fighter_2_id = fighter_2_id.split('/')[-1]\n\n # Winner name\n if status[0].strip() == 'W':\n winner = fighter_1\n elif status[1].strip() == 'W':\n winner = fighter_2\n elif status[0].strip() == 'D':\n winner = 'Draw'\n else:\n winner = 'NC'\n\n weight_class = response.css(\n '.b-fight-details__fight-title ::text').getall()\n\n if len(weight_class) > 1:\n weight_class = weight_class[-1].strip()\n if len(weight_class) == 1:\n weight_class = weight_class[0].strip()\n\n decision_method = response.css(\n \"i.b-fight-details__text-item_first [style='font-style: normal'] ::text\"\n ).get()\n\n fight_details = response.css('.b-fight-details__text-item')\n\n time_format = fight_details[2].css('::text').getall()[-1]\n fight_duration_lastrnd = fight_details[0].css('::text').getall()[-1]\n fight_duration_lastrnd_time = fight_details[1].css(\n '::text').getall()[-1]\n\n l = ItemLoader(item=FightsItem(), response=response)\n l.add_value('fight_id', fight_id)\n l.add_value('date', date)\n l.add_value('location', location)\n l.add_value('fighter_1', fighter_1)\n l.add_value('fighter_1_id', fighter_1_id)\n l.add_value('fighter_2', fighter_2)\n l.add_value('fighter_2_id', fighter_2_id)\n l.add_value('winner', winner)\n l.add_value('weight_class', weight_class)\n l.add_value('decision_method', decision_method.strip())\n l.add_value('time_format', time_format.strip())\n l.add_value('fight_duration_lastrnd', fight_duration_lastrnd.strip())\n l.add_value('fight_duration_lastrnd_time',\n fight_duration_lastrnd_time.strip())\n\n ##### Fighter Stats ######\n fighter_status = [i.strip() for i in status]\n fighter_id = list([fighter_1_id, fighter_2_id])\n fighter_name = list([fighter_1, fighter_2])\n\n stats = response.css('table:not(.js-fight-table)')\n\n # Fight stats - handle missing values\n if len(stats) == 2:\n stats_total = stats[0].css(\n '.b-fight-details__table-body .b-fight-details__table-col')\n stats_str = stats[1].css(\n '.b-fight-details__table-body .b-fight-details__table-col')\n\n ## Totals\n kd = stats_total[1].css('p ::text').getall()\n kd = [int(i.strip()) for i in kd]\n\n sig_str = stats_total[2].css('p ::text').getall()\n total_str = stats_total[4].css('p ::text').getall()\n td = stats_total[5].css('p ::text').getall()\n\n n_sub = stats_total[7].css('p ::text').getall()\n n_sub = [int(i.strip()) for i in n_sub]\n\n n_pass = stats_total[8].css('p ::text').getall()\n n_pass = [int(i.strip()) for i in n_pass]\n\n n_rev = stats_total[9].css('p ::text').getall()\n n_rev = [int(i.strip()) for i in n_rev]\n\n ## Significant strikes\n head = stats_str[3].css('p ::text').getall()\n body = stats_str[4].css('p ::text').getall()\n leg = stats_str[5].css('p ::text').getall()\n distance = stats_str[6].css('p ::text').getall()\n clinch = stats_str[7].css('p ::text').getall()\n ground = stats_str[8].css('p ::text').getall()\n else:\n kd = None\n sig_str = None\n total_str = None\n td = None\n n_sub = None\n n_pass = None\n n_rev = None\n head = None\n body = None\n leg = None\n distance = None\n clinch = None\n ground = None\n\n #l.add_value('fight_id', fight_id)\n l.add_value('fighter_id', fighter_id)\n l.add_value('fighter_name', fighter_name)\n l.add_value('fighter_status', fighter_status)\n l.add_value('kd', kd)\n l.add_value('sig_str_land', get_element_atk(sig_str, 'landed'))\n l.add_value('sig_str_att', get_element_atk(sig_str, 'attempt'))\n l.add_value('total_str_land', get_element_atk(total_str, 'landed'))\n l.add_value('total_str_att', get_element_atk(total_str, 'attempt'))\n l.add_value('td_land', get_element_atk(td, 'landed'))\n l.add_value('td_att', get_element_atk(td, 'attempt'))\n l.add_value('n_sub', n_sub)\n l.add_value('n_pass', n_pass)\n l.add_value('n_rev', n_rev)\n l.add_value('head_land', get_element_atk(head, 'landed'))\n l.add_value('head_att', get_element_atk(head, 'attempt'))\n l.add_value('body_land', get_element_atk(body, 'landed'))\n l.add_value('body_att', get_element_atk(body, 'attempt'))\n l.add_value('leg_land', get_element_atk(leg, 'landed'))\n l.add_value('leg_att', get_element_atk(leg, 'attempt'))\n l.add_value('distance_land', get_element_atk(distance, 'landed'))\n l.add_value('distance_att', get_element_atk(distance, 'attempt'))\n l.add_value('clinch_land', get_element_atk(clinch, 'landed'))\n l.add_value('clinch_att', get_element_atk(clinch, 'attempt'))\n l.add_value('ground_land', get_element_atk(ground, 'landed'))\n l.add_value('ground_att', get_element_atk(ground, 'attempt'))\n l.add_value('sig_str_abs', get_element_dmg(sig_str, 'absorbed'))\n l.add_value('sig_str_def', get_element_dmg(sig_str, 'defended'))\n l.add_value('total_str_abs', get_element_dmg(total_str, 'absorbed'))\n l.add_value('total_str_def', get_element_dmg(total_str, 'defended'))\n l.add_value('td_abs', get_element_dmg(td, 'absorbed'))\n l.add_value('td_def', get_element_dmg(td, 'defended'))\n l.add_value('head_abs', get_element_dmg(head, 'absorbed'))\n l.add_value('head_def', get_element_dmg(head, 'defended'))\n l.add_value('body_abs', get_element_dmg(body, 'absorbed'))\n l.add_value('body_def', get_element_dmg(body, 'defended'))\n l.add_value('leg_abs', get_element_dmg(leg, 'absorbed'))\n l.add_value('leg_def', get_element_dmg(leg, 'defended'))\n l.add_value('distance_abs', get_element_dmg(distance, 'absorbed'))\n l.add_value('distance_def', get_element_dmg(distance, 'defended'))\n l.add_value('clinch_abs', get_element_dmg(clinch, 'absorbed'))\n l.add_value('clinch_def', get_element_dmg(clinch, 'defended'))\n l.add_value('ground_abs', get_element_dmg(ground, 'absorbed'))\n l.add_value('ground_def', get_element_dmg(ground, 'defended'))\n\n yield l.load_item()\n\n\nclass FightersSpider(scrapy.Spider):\n name = 'ufcFighters'\n start_urls = ['http://ufcstats.com/statistics/fighters']\n\n custom_settings = {\n 'FEED_FORMAT': 'csv',\n 'FEED_URI': 'data/fighter_stats/%(time)s.csv'\n }\n\n def parse(self, response):\n \"\"\"\n Parse the fighter listing page, follow link to each alphabetical page\n \"\"\"\n\n by_alphabets = response.css(\n '.b-statistics__nav-link ::attr(href)').getall()\n\n pages_by_alphabets = []\n for alphabet in by_alphabets:\n link = alphabet + '&page=all'\n pages_by_alphabets.append(link)\n\n for page in pages_by_alphabets:\n yield response.follow(page, callback=self.parse_fighter_link)\n\n def parse_fighter_link(self, response):\n \"\"\"\n Parse each alphabetical listing, find links to each fighter\n \"\"\"\n\n rows = response.css('tbody .b-statistics__table-row')\n rows.pop(0)\n\n for row in rows:\n fighter_link = row.css('.b-statistics__table-col ::attr(href)').get()\n yield response.follow(fighter_link, callback=self.parse_fighter_stat)\n\n def parse_fighter_stat(self, response):\n \"\"\"\n Parse fighter summary stats\n \"\"\"\n fighter_id = response.url.split('/')[-1]\n name = response.css('.b-content__title-highlight ::text').get()\n\n record = response.css('.b-content__title-record ::text').get()\n record = re.findall(r'[0-9]+', record)\n\n stat_box = response.css('.b-list__box-list')\n stat_box_1 = stat_box[0].css('.b-list__box-list-item')\n stat_box_2 = stat_box[1].css('.b-list__box-list-item')\n stat_box_3 = stat_box[2].css('.b-list__box-list-item')\n\n height = stat_box_1[0].css('li::text').getall()\n weight = stat_box_1[1].css('li::text').getall()\n reach = stat_box_1[2].css('li::text').getall()\n stance = stat_box_1[3].css('li::text').getall()\n dob = stat_box_1[4].css('li::text').getall()\n\n sig_str_land_pM = stat_box_2[0].css('li::text').getall()\n sig_str_land_pct = stat_box_2[1].css('li::text').getall()\n sig_str_abs_pM = stat_box_2[2].css('li::text').getall()\n sig_str_def_pct = stat_box_2[3].css('li::text').getall()\n td_avg = stat_box_3[1].css('li::text').getall()\n td_land_pct = stat_box_3[2].css('li::text').getall()\n td_def_pct = stat_box_3[3].css('li::text').getall()\n sub_avg = stat_box_3[4].css('li::text').getall()\n\n l = ItemLoader(item=FighterSummaryItem(), response=response)\n l.add_value('fighter_id', fighter_id)\n l.add_value('name', name.strip())\n l.add_value('height', height[1].strip())\n l.add_value('weight', weight[1].strip())\n l.add_value('reach', reach[1].strip())\n l.add_value('stance', stance[1].strip())\n l.add_value('dob', dob[1].strip())\n l.add_value('n_win', record[0])\n l.add_value('n_loss', record[1])\n l.add_value('n_draw', record[2])\n l.add_value('sig_str_land_pM', sig_str_land_pM[1].strip())\n l.add_value('sig_str_land_pct', sig_str_land_pct[1].strip())\n l.add_value('sig_str_abs_pM', sig_str_abs_pM[1].strip())\n l.add_value('sig_str_def_pct', sig_str_def_pct[1].strip())\n l.add_value('td_avg', td_avg[1].strip())\n l.add_value('td_land_pct', td_land_pct[1].strip())\n l.add_value('td_def_pct', td_def_pct[1].strip())\n l.add_value('sub_avg', sub_avg[1].strip())\n\n yield l.load_item()\n\n\nclass UpcomingFightsSpider(scrapy.Spider):\n name = 'upcoming'\n start_urls = ['http://ufcstats.com/statistics/events/completed']\n time_created = print_time('now')\n\n custom_settings = {\n 'FEED_FORMAT': 'csv', \n 'FEED_URI': f'data/upcoming/{time_created}.csv'\n }\n\n def parse(self, response):\n \"\"\"\n Parse the event listing page, follow link to individual events page\n \"\"\"\n\n event_url = response.css(\n 'tbody .b-statistics__table-row_type_first ::attr(href)').get()\n\n yield response.follow(event_url, callback=self.parse_upcoming_event)\n\n def parse_upcoming_event(self, response):\n \"\"\"\n Parse the event page, follow link to each individual fight page\n \"\"\"\n\n event_info = response.css('.b-list__box-list-item')\n date = event_info[0].css('::text').getall()[-1]\n location = event_info[1].css('::text').getall()[-1]\n fights_url = response.css(\n '.b-fight-details__table-row ::attr(data-link)')\n\n for fight in fights_url:\n yield response.follow(fight,\n callback=self.parse_upcoming_fight,\n cb_kwargs=dict(date=date, location=location))\n\n def parse_upcoming_fight(self, response, date, location):\n \"\"\"\n Parse fight info - fight level summary, and fighter stats\n \"\"\"\n\n ##### Fight summary ######\n fight_id = response.url.split('/')[-1]\n # date and location carry over from events page\n date = date.strip()\n location = location.strip()\n\n # Fighter names\n names = response.css(\n '.b-fight-details__person-name :not(p)::text').getall()\n try:\n fighter_1 = names[0].strip()\n fighter_2 = names[1].strip()\n except:\n fighter_1 = None\n fighter_2 = None\n\n # IDs - Handle errors due to missing fighter link\n ids = response.css('.b-fight-details__person-name')\n fighter_1_id = ids[0].css('::attr(href)').get()\n fighter_2_id = ids[1].css('::attr(href)').get()\n\n if fighter_1_id is not None:\n fighter_1_id = fighter_1_id.split('/')[-1]\n if fighter_2_id is not None:\n fighter_2_id = fighter_2_id.split('/')[-1]\n\n weight_class = response.css(\n '.b-fight-details__fight-title ::text').getall()\n\n if len(weight_class) > 1:\n weight_class = weight_class[-1].strip()\n if len(weight_class) == 1:\n weight_class = weight_class[0].strip()\n\n l = ItemLoader(item=UpcomingFightsItem(), response=response)\n l.add_value('fight_id', fight_id)\n l.add_value('date', date)\n l.add_value('location', location)\n l.add_value('fighter_1', fighter_1)\n l.add_value('fighter_1_id', fighter_1_id)\n l.add_value('fighter_2', fighter_2)\n l.add_value('fighter_2_id', fighter_2_id)\n l.add_value('weight_class', weight_class)\n\n yield l.load_item()" }, { "alpha_fraction": 0.6440678238868713, "alphanum_fraction": 0.6453715562820435, "avg_line_length": 28.5, "blob_id": "6743235090da407aca4ec78443644fdaf554694b", "content_id": "c49bc1ec9bd1054f622180498879a7a2020c330b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 767, "license_type": "permissive", "max_line_length": 116, "num_lines": 26, "path": "/Makefile", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "NAME := ufc-stats-crawler\nTAG := $(shell git log -1 --pretty=%h)\nNAMESPACE := tedostrem/ufc-stats-crawler\nDATA_DIR := ${PWD}/data\n\n.PHONY : build push\n\nall : build push\n\nbuild :\n\tdocker build -t ${NAMESPACE}/${NAME}:${TAG} -t ${NAMESPACE}/${NAME}:latest .\n\nbash :\n\tdocker run -it -v /app/data/:${DATA_DIR} ${NAMESPACE}/${NAME}:${TAG} bash\n\nufcFights :\n\tdocker run -it -v /app/data/:${DATA_DIR} ${NAMESPACE}/${NAME}:${TAG} scrapy crawl -L DEBUG -o - -t json ufcFights\n\nufcFighters :\n\tdocker run -it -v /app/data/:${DATA_DIR} ${NAMESPACE}/${NAME}:${TAG} scrapy crawl -L DEBUG -o - -t json ufcFighters\n\nupcoming :\n\tdocker run -it -v /app/data/:${DATA_DIR} ${NAMESPACE}/${NAME}:${TAG} scrapy crawl -L DEBUG -o - -t json upcoming\n\npush :\n\tdocker push ${NAMESPACE}/${NAME}\n" }, { "alpha_fraction": 0.5924018025398254, "alphanum_fraction": 0.5940116047859192, "avg_line_length": 24.252033233642578, "blob_id": "e2c45a840e44910e3285a9741e94af3bbc19a356", "content_id": "260551495d85e36b264a70eb4356b86434a3d81f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3106, "license_type": "permissive", "max_line_length": 73, "num_lines": 123, "path": "/ufcStats/pipelines.py", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom scrapy.exporters import JsonLinesItemExporter, CsvItemExporter\nimport pathlib\n\nfrom ufcStats.utils import print_time\n\nfields_fight_info = [\n 'fight_id', 'fighter_1', 'fighter_1_id', 'fighter_2', 'fighter_2_id',\n 'winner', 'decision_method', 'fight_duration_lastrnd',\n 'fight_duration_lastrnd_time', 'time_format', 'weight_class', 'date',\n 'location'\n]\n\nfields_fight_stats = [\n 'fight_id',\n 'fighter_id',\n 'fighter_name',\n 'fighter_status',\n 'kd',\n 'n_pass',\n 'n_rev',\n 'n_sub',\n 'sig_str_abs',\n 'sig_str_att',\n 'sig_str_def',\n 'sig_str_land',\n 'total_str_abs',\n 'total_str_att',\n 'total_str_def',\n 'total_str_land',\n 'td_abs',\n 'td_att',\n 'td_def',\n 'td_land',\n 'head_abs',\n 'head_att',\n 'head_def',\n 'head_land',\n 'body_abs',\n 'body_att',\n 'body_def',\n 'body_land',\n 'leg_abs',\n 'leg_att',\n 'leg_def',\n 'leg_land',\n 'distance_abs',\n 'distance_att',\n 'distance_def',\n 'distance_land',\n 'clinch_abs',\n 'clinch_att',\n 'clinch_def',\n 'clinch_land',\n 'ground_abs',\n 'ground_att',\n 'ground_def',\n 'ground_land',\n]\n\n\nclass FightSummaryPipeline(object):\n \"\"\"\n Save Fight level summary to csv file\n \"\"\"\n def __init__(self):\n self.files = {}\n\n def open_spider(self, spider):\n time_created = print_time('now')\n # Create directory\n path_fight_info = f'data/fight_info'\n pathlib.Path(path_fight_info).mkdir(parents=True, exist_ok=True)\n # Write to folder\n file = open(f'{path_fight_info}/{time_created}.csv', 'wb')\n self.files[spider] = file\n self.exporter = CsvItemExporter(file)\n self.exporter.fields_to_export = fields_fight_info\n self.exporter.start_exporting()\n\n def close_spider(self, spider):\n self.exporter.finish_exporting()\n file = self.files.pop(spider)\n file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n\n\nclass FightStatsPipeline(object):\n \"\"\"\n Save Fight stats to jl file\n \"\"\"\n def __init__(self):\n self.files = {}\n\n def open_spider(self, spider):\n time_created = print_time('now')\n # Create directory\n path_fight_stats = f'data/fight_stats'\n pathlib.Path(path_fight_stats).mkdir(parents=True, exist_ok=True)\n # Write to folder\n file = open(f'{path_fight_stats}/{time_created}.jl', 'wb')\n self.files[spider] = file\n self.exporter = JsonLinesItemExporter(file)\n self.exporter.fields_to_export = fields_fight_stats\n self.exporter.start_exporting()\n\n def close_spider(self, spider):\n self.exporter.finish_exporting()\n file = self.files.pop(spider)\n file.close()\n\n def process_item(self, item, spider):\n self.exporter.export_item(item)\n return item\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7699999809265137, "avg_line_length": 19, "blob_id": "7199044ab0a2ca8263e541da89cb6abeac92b007", "content_id": "86c3085ad3e82b7714da5e480c649070a313fb0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 100, "license_type": "permissive", "max_line_length": 35, "num_lines": 5, "path": "/Dockerfile", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "FROM python:3\nWORKDIR /app\nADD requirements.txt /app\nRUN pip install -r requirements.txt\nADD . /app\n" }, { "alpha_fraction": 0.6884955763816833, "alphanum_fraction": 0.691150426864624, "avg_line_length": 37.965518951416016, "blob_id": "79ac6623e3264f3266f3c7a615d58a46d76baf15", "content_id": "71b4ed7af0cc1d46dca0ffbdce70c27c83217bbe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4520, "license_type": "permissive", "max_line_length": 83, "num_lines": 116, "path": "/ufcStats/items.py", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "# -*coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://docs.scrapy.org/en/latest/topics/items.html\n\nimport scrapy\nfrom scrapy.loader.processors import Identity, TakeFirst, Compose, MapCompose, Join\n\nSTR_toInt = Compose(TakeFirst(), int)\nSTR_toFloat = Compose(TakeFirst(), float)\n\n\ndef stripPercent(str_input):\n number = str_input.strip('%')\n return float(number) / 100\n\n\nclass FightsItem(scrapy.Item):\n fight_id = scrapy.Field(output_processor=TakeFirst())\n ## Fight summary, yields to csv\n date = scrapy.Field(output_processor=TakeFirst())\n location = scrapy.Field(output_processor=TakeFirst())\n fighter_1 = scrapy.Field(output_processor=TakeFirst())\n fighter_1_id = scrapy.Field(output_processor=TakeFirst())\n fighter_2 = scrapy.Field(output_processor=TakeFirst())\n fighter_2_id = scrapy.Field(output_processor=TakeFirst())\n winner = scrapy.Field(output_processor=TakeFirst())\n weight_class = scrapy.Field(output_processor=TakeFirst())\n decision_method = scrapy.Field(output_processor=TakeFirst())\n time_format = scrapy.Field(output_processor=TakeFirst())\n fight_duration_lastrnd = scrapy.Field(output_processor=STR_toInt)\n fight_duration_lastrnd_time = scrapy.Field(output_processor=TakeFirst())\n ## Fighter stats, yields to json\n fighter_id = scrapy.Field()\n fighter_name = scrapy.Field()\n fighter_status = scrapy.Field()\n kd = scrapy.Field()\n sig_str_land = scrapy.Field()\n sig_str_att = scrapy.Field()\n total_str_land = scrapy.Field()\n total_str_att = scrapy.Field()\n td_land = scrapy.Field()\n td_att = scrapy.Field()\n n_sub = scrapy.Field()\n n_pass = scrapy.Field()\n n_rev = scrapy.Field()\n head_land = scrapy.Field()\n head_att = scrapy.Field()\n body_land = scrapy.Field()\n body_att = scrapy.Field()\n leg_land = scrapy.Field()\n leg_att = scrapy.Field()\n distance_land = scrapy.Field()\n distance_att = scrapy.Field()\n clinch_land = scrapy.Field()\n clinch_att = scrapy.Field()\n ground_land = scrapy.Field()\n ground_att = scrapy.Field()\n sig_str_abs = scrapy.Field()\n sig_str_def = scrapy.Field()\n total_str_abs = scrapy.Field()\n total_str_def = scrapy.Field()\n td_abs = scrapy.Field()\n td_def = scrapy.Field()\n head_abs = scrapy.Field()\n head_def = scrapy.Field()\n body_abs = scrapy.Field()\n body_def = scrapy.Field()\n leg_abs = scrapy.Field()\n leg_def = scrapy.Field()\n distance_abs = scrapy.Field()\n distance_def = scrapy.Field()\n clinch_abs = scrapy.Field()\n clinch_def = scrapy.Field()\n ground_abs = scrapy.Field()\n ground_def = scrapy.Field()\n\n\nclass UpcomingFightsItem(scrapy.Item):\n fight_id = scrapy.Field(output_processor=TakeFirst())\n date = scrapy.Field(output_processor=TakeFirst())\n location = scrapy.Field(output_processor=TakeFirst())\n fighter_1 = scrapy.Field(output_processor=TakeFirst())\n fighter_1_id = scrapy.Field(output_processor=TakeFirst())\n fighter_2 = scrapy.Field(output_processor=TakeFirst())\n fighter_2_id = scrapy.Field(output_processor=TakeFirst())\n weight_class = scrapy.Field(output_processor=TakeFirst())\n\n\nclass FighterSummaryItem(scrapy.Item):\n # define the fields for your item here like:\n fighter_id = scrapy.Field(output_processor=TakeFirst())\n name = scrapy.Field(output_processor=TakeFirst())\n height = scrapy.Field(output_processor=TakeFirst())\n weight = scrapy.Field(output_processor=TakeFirst())\n reach = scrapy.Field(output_processor=TakeFirst())\n stance = scrapy.Field(output_processor=TakeFirst())\n dob = scrapy.Field(output_processor=TakeFirst())\n #active = scrapy.Field(output_processor=TakeFirst())\n n_win = scrapy.Field(output_processor=STR_toInt)\n n_loss = scrapy.Field(output_processor=STR_toInt)\n n_draw = scrapy.Field(output_processor=STR_toInt)\n sig_str_land_pM = scrapy.Field(output_processor=STR_toFloat)\n sig_str_land_pct = scrapy.Field(\n output_processor=Compose(TakeFirst(), stripPercent))\n sig_str_abs_pM = scrapy.Field(output_processor=STR_toFloat)\n sig_str_def_pct = scrapy.Field(\n output_processor=Compose(TakeFirst(), stripPercent))\n td_avg = scrapy.Field(output_processor=STR_toFloat)\n td_land_pct = scrapy.Field(\n output_processor=Compose(TakeFirst(), stripPercent))\n td_def_pct = scrapy.Field(\n output_processor=Compose(TakeFirst(), stripPercent))\n sub_avg = scrapy.Field(output_processor=STR_toFloat)\n" }, { "alpha_fraction": 0.5161290168762207, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 15, "blob_id": "2532a1064eff7d1976f215f65d663a101b17f3cd", "content_id": "d1c294da212a4478af70fa4038ba3f67750d945d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 31, "license_type": "permissive", "max_line_length": 17, "num_lines": 2, "path": "/requirements.txt", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "dateparser==0.7.2\nScrapy==1.7.4" }, { "alpha_fraction": 0.7024579644203186, "alphanum_fraction": 0.7031047940254211, "avg_line_length": 32.35555648803711, "blob_id": "678aa165e75739dfcb74e8a78ea8e7d4c5773188", "content_id": "e9e1cc718e41f55fc3c24e6a61ba3decd9c18da7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3092, "license_type": "permissive", "max_line_length": 201, "num_lines": 90, "path": "/README.md", "repo_name": "fanghuiz/ufc-stats-crawler", "src_encoding": "UTF-8", "text": "# UFC Stats Crawler\r\n\r\n## Table of Contents\r\n\r\n- [About](#about)\r\n- [Building and Running with Docker](#docker)\r\n- [Getting Started](#getting_started)\r\n- [Usage](#usage)\r\n- [TODO](#to_do)\r\n\r\n## About <a name = \"about\"></a>\r\n\r\nThis is a web scraper to get data from [UFC Stats](http://ufcstats.com/), built using [Scrapy](https://github.com/scrapy/scrapy). Scraped data are organized as follows:\r\n\r\nAll completed UFC fights:\r\n\r\n- `fight_info` <a name = \"fight_info\"></a> table, contains fight/match-up level meta-data.\r\n- `fighter_stats`<a name = \"fighter_stats\"></a> table, contains fighter level data of fighters' career summary statistics.\r\n- `fight_stats` <a name=\"fight_stats\"></a> contains fighter-level performance data within each match-up.\r\n\r\nUpcoming fights:\r\n\r\n- `upcoming`<a name = \"upcoming\"></a> table contains match-up level information of all the upcoming fights in the next UFC event, according to this page http://ufcstats.com/statistics/events/completed.\r\n\r\nLet me know if you've used the crawler or data to make something cool :wave:\r\n\r\n## Building and Running with Docker <a name = \"docker\"></a>\r\nLogs will be written to standard output in json format.\r\n\r\n```\r\nmake build # Builds the docker container\r\nmake ufcFights # Run the ufcFights crawler\r\nmake ufcFighters # Run the ufcFighters crawler\r\nmake upcoming # Run the upcoming crawler\r\n```\r\n\r\n## Getting Started <a name = \"getting_started\"></a>\r\n\r\n### Prerequisites\r\n* Python 3\r\n* Scrapy\r\n\r\nInstall required packages.\r\n\r\n```\r\npip install -r requirements.txt\r\n```\r\n\r\nIf you have trouble installing Scrapy, see the install section in Scrapy documentation at https://docs.scrapy.org/en/latest/intro/install.html for more details.\r\n\r\n### Installing\r\n\r\nClone or fork the repo. Or download a local copy. Then crawl away.\r\n\r\n## Usage <a name = \"usage\"></a>\r\n\r\n_Note: in the current version, running the spider will crawl the entire site, so it will take some time._\r\n\r\nCall `scrapy crawl spider_name` to start the crawler. There are 3 spiders you can run:\r\n\r\n```\r\nscrapy crawl ufcFights\r\n```\r\n\r\nThe `ufcFights` spider will return\r\n\r\n- [`fight_info`](#fight_info) table as a `.csv` file saved in `data/fight_info` directory.\r\n- [`fight_stats`](#fight_stats) table as `.jl` file (newline-delimited JSON) saved in `data/fight_stats` directory. One line per fight.\r\n\r\n*If you prefer other output formats, you can modify the respective feed exports pipelines in `pipelines.py`. Or file an issue and let me know.*\r\n\r\n\r\n```\r\nscrapy crawl ufcFighters\r\n```\r\n\r\nThe `ufcFighters` spider will return the [`fighter_stats`](#fighter_stats) table as a `.csv` file saved in `data/fighter_stats` directory.\r\n\r\n```\r\nscrapy crawl upcoming\r\n```\r\n\r\nThe `upcoming` spider will return [`upcoming`](#upcoming) table as a `.csv` file, saved in `data/upcoming` directory.\r\n\r\nAll output files use timestamp as file names, stored in different folders.\r\n\r\n## TODO <a name = \"to_do\"></a>\r\n\r\n- [x] Add a spider to scrape upcoming fights\r\n- [ ] Add options to limit the spider's scope, e.g. only scrape the new matches rather than the entire site.\r\n" } ]
8
tupic98/Puertas
https://github.com/tupic98/Puertas
c950df9c3170cc0cccbcd691e2a4483be63fd4c0
82508ec6843d33e9b81b37dce368562c76a6e5f4
ba439fc153963bc3c41083e01697e3bfed148d4f
refs/heads/master
2020-03-21T19:54:30.587690
2018-06-28T12:45:06
2018-06-28T12:45:06
138,977,074
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.45661866664886475, "alphanum_fraction": 0.48468679189682007, "avg_line_length": 39.23823547363281, "blob_id": "eeaea66a31e65d97d1abd008b34cb49586d86e34", "content_id": "1dbcdeefea41216b2288c805c2f487f04048d086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13699, "license_type": "no_license", "max_line_length": 128, "num_lines": 340, "path": "/Puertas.py", "repo_name": "tupic98/Puertas", "src_encoding": "UTF-8", "text": "import winsound\nfrom random import randint\nfrom time import sleep\n\nimport wx\n\nganarcambio=0\nganarsincambio=0\nperdercambio=0\nperdersincambio=0\nabierta=0\nactual=0\notra=0\npremio=0\nturno = False\n\nclass MiFrame(wx.Frame):\n def __init__(self,*args,**kwargs):\n global turno\n wx.Frame.__init__(self,*args,**kwargs)\n self.Show()\n blanco = wx.Colour(255, 255, 255)\n self.SetBackgroundColour(blanco)\n panel = wx.Panel(self, -1, pos=(0, 0), size=(800, 600))\n panel.SetBackgroundColour(blanco)\n\n cabra1 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(88, 35), size=(125, 200))\n\n cabra2 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(340, 35), size=(125, 200))\n\n cabra3 = wx.StaticBitmap(self, -1, wx.Bitmap('cabra.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(585, 35), size=(125, 200))\n\n carro1 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(0, 35), size=(200, 200))\n\n carro2 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(252, 35), size=(200, 200))\n\n carro3 = wx.StaticBitmap(self, -1, wx.Bitmap('carro.png', wx.BITMAP_TYPE_ANY),\n pos=wx.Point(497, 35), size=(200, 200))\n # Puertas\n puerta1 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),\n wx.Point(88, 35), wx.Size(120, 200), 0)\n\n puerta2 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),\n wx.Point(340, 35), wx.Size(120, 200), 0)\n\n puerta3 = wx.BitmapButton(panel, -1, wx.Bitmap('puerta.png', wx.BITMAP_TYPE_ANY),\n wx.Point(585, 35), wx.Size(120, 200), 0)\n\n change = wx.BitmapButton(panel, -1, wx.Bitmap('checked.png', wx.BITMAP_TYPE_ANY),\n wx.Point(290, 340), wx.Size(64, 64), 0)\n\n do_not_change = wx.BitmapButton(panel, -1, wx.Bitmap('cross.png', wx.BITMAP_TYPE_ANY),\n wx.Point(440, 340), wx.Size(64, 64), 0)\n\n reset = wx.BitmapButton(panel, -1, wx.Bitmap('refreshing1.png', wx.BITMAP_TYPE_ANY),\n wx.Point(368, 340), wx.Size(64, 64), 0)\n reset.SetBackgroundColour((0,0,0))\n texto = wx.StaticText(self, id=-1, label=\"Elige una puerta para iniciar el juego. \", pos=(200, 265),\n size=(400, 50), style=wx.ALIGN_CENTRE)\n font = wx.Font(15, wx.DECORATIVE, wx.NORMAL, wx.BOLD)\n texto.SetFont(font)\n\n Ganadas = wx.StaticText(self, id=-1, label=(\"Partidas ganadas cambiando puerta: %d\" % ganarcambio), pos=(30, 475),\n size=(250, 50), style=wx.ALIGN_LEFT)\n\n Perdidas = wx.StaticText(self, id=-1, label=\"Partidas perdidas cambiando puerta: %d\" % perdercambio, pos=(550, 475),\n size=(250, 50), style=wx.ALIGN_LEFT)\n\n Ganadas2 = wx.StaticText(self, id=-1, label=\"Partidas ganadas sin cambio de puerta: %d\" % ganarsincambio, pos=(30, 525),\n size=(250, 50), style=wx.ALIGN_LEFT)\n\n Perdidas2 = wx.StaticText(self, id=-1, label=\"Partidas perdidas sin cambio de puerta: %d\" % perdersincambio,\n pos=(550, 525), size=(250, 50), style=wx.ALIGN_LEFT)\n\n change.Hide()\n do_not_change.Hide()\n reset.Hide()\n cabra1.Hide()\n cabra2.Hide()\n cabra3.Hide()\n carro1.Hide()\n carro2.Hide()\n carro3.Hide()\n \n def reset_listener(self):\n global abierta;\n global actual\n global otra\n global premio\n global turno\n\n reset.Hide()\n cabra1.Hide()\n cabra2.Hide()\n cabra3.Hide()\n carro1.Hide()\n carro2.Hide()\n carro3.Hide()\n puerta1.Show()\n puerta1.Enable(True)\n puerta2.Show()\n puerta2.Enable(True)\n puerta3.Show()\n puerta3.Enable(True)\n turno = False\n\n texto.SetLabel(\"Elige una puerta para empezar a jugar:\")\n\n abierta=0\n actual=0\n otra=0\n premio=0\n\n def change_door(self):\n global perdercambio\n global ganarcambio\n global imagen\n global actual\n global premio\n global abierta\n\n change.Hide()\n do_not_change.Hide()\n\n duplicado = actual\n\n if duplicado==1:\n puerta1.Enable(True)\n if duplicado==2:\n puerta2.Enable(True)\n if duplicado==3:\n puerta3.Enable(True)\n actual = randint(1, 3)\n while actual == abierta or actual == duplicado:\n actual=randint(1, 3)\n if actual==1:\n puerta1.Enable(False)\n \n if actual==2:\n puerta2.Enable(False)\n if actual==3:\n puerta3.Enable(False)\n \n texto.SetLabel(\"Cambiaste la puerta %d por la puerta %d\" % (duplicado, actual))\n sleep(2.2)\n winsound.PlaySound(\"door.wav\", winsound.SND_ASYNC | winsound.SND_ALIAS)\n print(\"El actual es %d\" % actual)\n print(\"El premio es %d\" % premio)\n print (\"El abierta es %d\" % abierta)\n \n if actual==1:\n puerta1.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro1.Show()\n ganarcambio= ganarcambio + 1\n Ganadas.SetLabel(\"Partidas ganadas cambiando puerta: %d\" % ganarcambio)\n \n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra1.Show()\n perdercambio= perdercambio + 1\n Perdidas.SetLabel(\"Partidas perdidas cambiando puerta: %d\" % perdercambio)\n if actual==2:\n puerta2.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro2.Show()\n ganarcambio= ganarcambio + 1\n Ganadas.SetLabel(\"Partidas ganadas cambiando puerta: %d\" % ganarcambio)\n \n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra2.Show()\n perdercambio= perdercambio + 1\n Perdidas.SetLabel(\"Partidas perdidas cambiando puerta: %d\" % perdercambio)\n if actual==3:\n puerta3.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro3.Show()\n ganarcambio= ganarcambio + 1\n Ganadas.SetLabel(\"Partidas ganadas cambiando puerta: %d\" % ganarcambio)\n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra3.Show()\n perdercambio= perdercambio + 1\n Perdidas.SetLabel(\"Partidas perdidas cambiando puerta: %d\" % perdercambio)\n reset.Show()\n\n def do_not_change_door(self):\n global perdersincambio\n global ganarsincambio\n change.Hide()\n do_not_change.Hide()\n texto.SetLabel(\"Elegiste quedarte con la puerta %d.\" % actual)\n print(\"Nueva partida\")\n print(\"El actual es %d\" % actual)\n print(\"El premio es %d\" % premio)\n print (\"El abierta es %d\" % abierta)\n sleep(2.2)\n if actual==1:\n puerta1.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro1.Show()\n ganarsincambio= ganarsincambio + 1\n Ganadas2.SetLabel(\"Partidas ganadas sin cambio de puerta: %d\" % ganarsincambio)\n \n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra1.Show()\n perdersincambio= perdersincambio + 1\n Perdidas2.SetLabel(\"Partidas perdidas sin cambio de puerta: %d\" % perdersincambio)\n if actual==2:\n puerta2.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro2.Show()\n ganarsincambio= ganarsincambio + 1\n Ganadas2.SetLabel(\"Partidas ganadas sin cambio de puerta: %d\" % ganarsincambio)\n \n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra2.Show()\n perdersincambio= perdersincambio + 1\n Perdidas2.SetLabel(\"Partidas perdidas sin cambio de puerta: %d\" % perdersincambio)\n \n if actual==3:\n puerta3.Hide();\n if actual==premio:\n texto.SetLabel(\"¡Buena opción! Encontraste el premio.\")\n carro3.Show()\n ganarsincambio= ganarsincambio + 1\n Ganadas2.SetLabel(\"Partidas ganadas sin cambio de puerta: %d\" % ganarsincambio)\n else:\n texto.SetLabel(\"Perdiste... Elegista la puerta de la cabra\")\n cabra3.Show()\n perdersincambio= perdersincambio + 1\n Perdidas2.SetLabel(\"Partidas perdidas sin cambio de puerta: %d\" % perdersincambio)\n reset.Show()\n \n \n def timeout(abierta):\n texto.SetLabel(\"Observa que en la puerta %d esta la cabra.\" % abierta)\n sleep(2.2)\n timeout2(abierta)\n\n def timeout2(abierta):\n global actual\n global premio\n if actual == 1:\n texto.SetLabel(\"¿Deseas cambiar de puerta?\")\n if actual == 2:\n texto.SetLabel(\"¿Deseas cambiar de puerta?\")\n if actual == 3:\n texto.SetLabel(\"¿Deseas cambiar de puerta?\")\n winsound.PlaySound(\"door.wav\", winsound.SND_ASYNC | winsound.SND_ALIAS)\n if abierta==1:\n puerta1.Hide();\n cabra1.Show()\n if abierta==2:\n puerta2.Hide();\n cabra2.Show()\n if abierta==3:\n puerta3.Hide();\n cabra3.Show()\n change.Show()\n do_not_change.Show()\n print(\"El actual es %d\" % actual)\n print(\"El premio es %d\" % premio)\n print(\"La abierta es %d\" % abierta)\n\n def puertaA(usuario):\n global premio\n premio=randint(1, 3)\n global abierta\n abierta=randint(1, 3)\n while abierta==premio or abierta==usuario:\n abierta=randint(1, 3)\n return abierta\n \n def onpuerta1(self):\n global turno\n global actual\n global abierta\n turno = True\n actual=1\n abierta=puertaA(actual)\n texto.SetLabel(\"Elegista la puerta 1\")\n puerta1.Disable()\n sleep(2.2)\n timeout(abierta)\n print(u\"Has presionado el botón 1\")\n\n def onpuerta2(self):\n global actual\n global abierta\n global turno\n turno = True\n actual=2\n abierta=puertaA(actual)\n texto.SetLabel(\"Elegiste la puerta 2\")\n puerta2.Disable()\n sleep(2.2)\n timeout(abierta)\n print(u\"Has presionado el botón 2\")\n\n def onpuerta3(self):\n global actual\n global abierta\n global turno\n turno = True\n actual=3\n abierta=puertaA(actual)\n texto.SetLabel(\"Elegista la puerta 3\")\n puerta3.Disable()\n sleep(2.2)\n timeout(abierta)\n print (u\"Has presionado el botón 3\")\n\n if turno is False:\n puerta1.Bind(wx.EVT_BUTTON, onpuerta1)\n puerta2.Bind(wx.EVT_BUTTON, onpuerta2)\n puerta3.Bind(wx.EVT_BUTTON, onpuerta3)\n\n change.Bind(wx.EVT_BUTTON, change_door)\n do_not_change.Bind(wx.EVT_BUTTON, do_not_change_door)\n reset.Bind(wx.EVT_BUTTON, reset_listener)\n\n\nif __name__ == '__main__':\n app = wx.App() \n fr = MiFrame(None, -1, \" Juego de las puertas\", size=(800,600),style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)\n app.MainLoop()\n" } ]
1
umluizlima/ac318-turma1-grupo1
https://github.com/umluizlima/ac318-turma1-grupo1
8f300eea9641b0bf76fc3ee5cb240f3fdf90b6e8
ac8b5ee04fada7f1c5fe1a4510ee1678b64ec66a
15b16da50b85da6ad8ac02ec05cb8c0726fb8531
refs/heads/master
2020-04-09T20:18:20.774323
2018-06-19T15:51:48
2018-06-19T15:51:48
124,242,601
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.6328871846199036, "alphanum_fraction": 0.6328871846199036, "avg_line_length": 29.764705657958984, "blob_id": "0d1f9edbba0f2a8cd5e6d09bd6472a64f2663f50", "content_id": "6c2e68ed34b37d59f733e8aaf7b730df987785e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/app/controller/main.py", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "from flask import (\n Blueprint, redirect, url_for, session, render_template, request, flash\n)\n\nfrom app.model import User\n\nbp = Blueprint('main', __name__, url_prefix='')\n\n\[email protected]('/', methods=[\"GET\", \"POST\"])\ndef index():\n if request.method == \"POST\":\n data = request.form.to_dict()\n if 'username' in data.keys():\n return redirect(url_for('user.profile', username=data['username']))\n flash('Nome de usuário inválido.')\n return render_template('main/index.html', title=\"Início\")\n" }, { "alpha_fraction": 0.5717977285385132, "alphanum_fraction": 0.5717977285385132, "avg_line_length": 28.129032135009766, "blob_id": "24463239efd988d5f0112f386d933181b85eb165", "content_id": "208d23074c239d656a79ce54219dcf50476daeb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2712, "license_type": "no_license", "max_line_length": 82, "num_lines": 93, "path": "/app/controller/auth.py", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "import functools\n\nfrom flask import (\n Blueprint, flash, g, redirect, render_template, request, session, url_for\n)\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom app.model import db, User, Telephone, Email\n\nbp = Blueprint('auth', __name__, url_prefix='')\n\n\[email protected]('/signup', methods=('GET', 'POST'))\ndef signup():\n if request.method == 'POST':\n error = None\n username = request.form['username']\n\n if User.query.filter_by(username=username).first() is not None:\n error = f'Usuário {username} já cadastrado.'\n\n if error is None:\n user = User(username=username,\n password=generate_password_hash(request.form['password']),\n first_name=request.form['firstname'],\n last_name=request.form['lastname'])\n db.session.add(user)\n user = User.query.filter_by(username=username).first()\n email = Email(tag=\"main\",\n email=request.form['email'],\n user_id=user.id)\n db.session.add(email)\n\n telephone = Telephone(tag=\"main\",\n telephone=request.form['telephone'],\n user_id=user.id)\n db.session.add(telephone)\n db.session.commit()\n return redirect(url_for('auth.login'))\n\n flash(error)\n\n return render_template('auth/signup.html', title='Cadastrar-se')\n\n\[email protected]('/login', methods=('GET', 'POST'))\ndef login():\n if request.method == 'POST':\n error = None\n username = request.form['username']\n password = request.form['password']\n user = User.query.filter_by(username=username).first()\n\n if user is None:\n error = 'Nome de usuário incorreto.'\n elif not check_password_hash(user.password, password):\n error = 'Senha incorreta.'\n\n if error is None:\n session.clear()\n session['user_id'] = user.id\n return redirect(url_for('main.index'))\n\n flash(error)\n\n return render_template('auth/login.html', title='Entrar')\n\n\[email protected]('/logout')\ndef logout():\n session.clear()\n return redirect(url_for('auth.login'))\n\n\ndef login_required(view):\n @functools.wraps(view)\n def wrapped_view(**kwargs):\n if g.user is None:\n return redirect(url_for('auth.login'))\n\n return view(**kwargs)\n\n return wrapped_view\n\n\[email protected]_app_request\ndef load_logged_in_user():\n user_id = session.get('user_id')\n\n if user_id is None:\n g.user = None\n else:\n g.user = User.query.filter_by(id=user_id).first()\n" }, { "alpha_fraction": 0.6068601608276367, "alphanum_fraction": 0.6081793904304504, "avg_line_length": 38.894737243652344, "blob_id": "0c668093d9be9b00c44c794594ff34fb957563e5", "content_id": "91fff413c9add7a7d1121b5098927fc27d60b20a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 128, "num_lines": 38, "path": "/app/templates/user/settings.html", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n{% block main %}\n<article>\n <header>\n <h2>Configurações</h2>\n </header>\n <main>\n <form method=\"POST\">\n <label for=\"firstname\">Nome</label>\n <input type=\"text\" name=\"first_name\" id=\"firstname\" value=\"{{ user['first_name'] }}\" required>\n\n <label for=\"lastname\">Sobrenome</label>\n <input type=\"text\" name=\"last_name\" id=\"lastname\" value=\"{{ user['last_name'] }}\" required>\n\n {% for email in user['emails'] %}\n <input type=\"number\" name=\"email_id\" value=\"{{ email['id'] }}\" hidden>\n <label>Email</label>\n <input class=\"form-control\" name=\"email\" id=\"email\" type=\"email\" placeholder=\"Email\"\n value=\"{{ email['email'] }}\" required=\"required\" data-validation-required-message=\"Please enter your name.\">\n {% endfor %}\n\n {% for phone in user['telephones'] %}\n <input type=\"number\" name=\"telephone_id\" value=\"{{ phone['id'] }}\" hidden>\n <label>Telefone</label>\n <input class=\"form-control\" name=\"telephone\" id=\"telephone\" type=\"text\" placeholder=\"Telefone\"\n value=\"{{ phone['telephone'] }}\" required=\"required\" data-validation-required-message=\"Please enter your phone number.\">\n {% endfor %}\n\n <button type=\"submit\" name=\"delete\" value=\"delete\">Deletar Conta</button>\n <button type=\"submit\" name=\"update\" value=\"update\">Salvar</button>\n </form>\n {% if message %}<p class=error>Message: {{ message }}</p>{% endif %}\n </main>\n <footer>\n\n </footer>\n</article>\n{% endblock %}\n" }, { "alpha_fraction": 0.5778651237487793, "alphanum_fraction": 0.5784699320793152, "avg_line_length": 33.44791793823242, "blob_id": "2638d5ff907fd656a4b8ee05c77bd1f74943542b", "content_id": "4a97dad155586012bc2c7da1aad67a546e2ad40e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 110, "num_lines": 96, "path": "/app/model.py", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "import os\nimport vobject\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\n\n\ndb = SQLAlchemy()\nmigrate = Migrate()\n\n\nclass User(db.Model):\n \"\"\"\n CREATE: db.session.add(item) -> db.session.commit()\n READ: Item.query.all() or Item.query.filter_by(key=value).first()\n UPDATE: item = Item.query.filter_by(key=value).first() -> item.key = value\\\n -> db.session.commit()\n DELETE: db.session.delete(item) -> db.session.commit()\n \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.Text, unique=True, nullable=False)\n password = db.Column(db.Text, nullable=False)\n first_name = db.Column(db.Text, nullable=False)\n last_name = db.Column(db.Text, nullable=False)\n emails = db.relationship('Email', cascade='delete', backref='user')\n telephones = db.relationship('Telephone', cascade='delete', backref='user')\n\n def to_dict(self):\n user = {'id': self.id,\n 'username': self.username,\n 'first_name': self.first_name,\n 'last_name': self.last_name,\n 'full_name': f'{self.first_name} {self.last_name}',\n 'emails': [email.to_dict() for email in Email.query.filter_by(user_id=self.id).all()],\n 'telephones': [phone.to_dict() for phone in Telephone.query.filter_by(user_id=self.id).all()]}\n return user\n\n def to_vcard(self):\n vcard = vobject.vCard()\n user = self.to_dict()\n\n name = vcard.add('fn')\n name.value = user['full_name']\n name = vcard.add('n')\n name.value = vobject.vcard.Name(family=user['last_name'],\n given=user['first_name'])\n\n for email in user['emails']:\n e = vcard.add('email')\n e.value = email['email']\n e.type_param = email['tag']\n\n for telephone in user['telephones']:\n t = vcard.add('tel')\n t.value = telephone['telephone']\n t.type_param = telephone['tag']\n\n filename = '_'.join([self.first_name, self.last_name, 'contact.vcf'])\n filepath = os.path.join(os.path.abspath(''), 'instance', 'vcf', filename)\n with open(filepath, 'w+') as f:\n f.write(vcard.serialize())\n return filename\n\n def __repr__(self):\n return f\"<User {self.username}>\"\n\n\nclass Email(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n tag = db.Column(db.Text, nullable=True)\n email = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def to_dict(self):\n email = {'id': self.id,\n 'tag': self.tag,\n 'email': self.email}\n return email\n\n def __repr__(self):\n return f\"'<Email {self.email}>\"\n\n\nclass Telephone(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n tag = db.Column(db.Text, nullable=True)\n telephone = db.Column(db.String(15), nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n\n def to_dict(self):\n telephone = {'id': self.id,\n 'tag': self.tag,\n 'telephone': self.telephone}\n return telephone\n\n def __repr__(self):\n return f\"<Telephone {self.telephone}>\"\n" }, { "alpha_fraction": 0.7888349294662476, "alphanum_fraction": 0.7888349294662476, "avg_line_length": 58, "blob_id": "6f078e893a6f36cac7e4844a8deb8aea9805523f", "content_id": "746127de646d05181f3cd36013d78a09df5d4d28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 423, "license_type": "no_license", "max_line_length": 271, "num_lines": 7, "path": "/README.md", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "# Olá,\nEste projeto consiste de uma aplicação web para troca de arquivos de contatos entre pessoas. Ao invés de passar suas informações uma a uma para uma pessoa salvar em sua agenda, passe a url do seu usuário e a pessoa poderá baixar seu cartão virtual (vCard) para adicionar.\n\n## Funcionamento\nUma pessoa pode se cadastrar fornecendo seus dados (nome, email e telefone)e um nome de usuário único.\n\nContinua..." }, { "alpha_fraction": 0.592238187789917, "alphanum_fraction": 0.5938330888748169, "avg_line_length": 35.17307662963867, "blob_id": "d65acdd60931ab4d147a4bd09594e38e71db9303", "content_id": "3c32547c617f9562c7cc8d4dec3b2f92e72063ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1883, "license_type": "no_license", "max_line_length": 82, "num_lines": 52, "path": "/app/controller/user.py", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "from flask import (\n Blueprint, render_template, abort, send_from_directory, request, session,\n jsonify, current_app, redirect, url_for, flash\n)\n\nfrom app.model import db, User, Email, Telephone\nfrom .auth import login_required\n\nbp = Blueprint('user', __name__, url_prefix='')\n\n\[email protected](\"/<username>\")\ndef profile(username):\n user = User.query.filter_by(username=username).first()\n if user:\n return render_template('user/profile.html', user=user.to_dict(),\n title=user.username)\n flash('Nome de usuário inválido.')\n return redirect(url_for('main.index'))\n\n\[email protected](\"/settings\", methods=[\"GET\", \"POST\"])\n@login_required\ndef settings():\n user = User.query.filter_by(id=session.get('user_id')).first()\n if request.method == \"POST\":\n data = request.form.to_dict()\n if 'delete' in data:\n db.session.delete(user)\n db.session.commit()\n if 'update' in data:\n print('Tentou editar!!!')\n user.first_name = data['first_name']\n user.last_name = data['last_name']\n email = Email.query.filter_by(id=data['email_id']).first()\n email.email = data['email']\n telephone = Telephone.query.filter_by(id=data['telephone_id']).first()\n telephone.telephone = data['telephone']\n db.session.commit()\n return redirect(url_for('user.profile', username=user.username))\n return render_template('user/settings.html', user=user.to_dict(),\n title='Editar')\n\n\[email protected](\"/download/<username>\")\ndef download(username):\n user = User.query.filter_by(username=username).first()\n if user:\n return send_from_directory(current_app.config['VCARD_FOLDER'],\n user.to_vcard(),\n as_attachment=True)\n abort(404)\n" }, { "alpha_fraction": 0.6567656993865967, "alphanum_fraction": 0.6567656993865967, "avg_line_length": 19.200000762939453, "blob_id": "391f75ecda073c2b9635d2f62f8f15d3f071a8a0", "content_id": "6ed0fd61e6734cffe56fde7f726b7ca71db96c9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 57, "num_lines": 15, "path": "/app/controller/pwa.py", "repo_name": "umluizlima/ac318-turma1-grupo1", "src_encoding": "UTF-8", "text": "from flask import (\n Blueprint, send_from_directory\n)\n\nbp = Blueprint('pwa', __name__, url_prefix='')\n\n\[email protected]('/manifest.json')\ndef manifest():\n return send_from_directory('static', 'manifest.json')\n\n\[email protected]('/sw.js')\ndef service_worker():\n return send_from_directory('static', 'sw.js')\n" } ]
7
Conniemac/cron_jobs
https://github.com/Conniemac/cron_jobs
f8c34e0df8b7356d65d9ec08b08f7b2c5107a1a8
0cf9ed09cf7c8a0b171a3fe89e545e2747347a93
7011b46138680ab480cb42bba22e28dc0a49424d
refs/heads/master
2021-02-18T06:52:25.788314
2020-03-05T14:25:21
2020-03-05T14:25:21
245,172,511
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7108860611915588, "alphanum_fraction": 0.7139240503311157, "avg_line_length": 25.70270347595215, "blob_id": "f0ccd1c039fd1e2baf934df0f7933b0cc5b89c36", "content_id": "6ca2a8c5480c81c76f83390f67cd2d01bb741b77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1975, "license_type": "no_license", "max_line_length": 159, "num_lines": 74, "path": "/python_scripts/check_framework_status.py", "repo_name": "Conniemac/cron_jobs", "src_encoding": "UTF-8", "text": "import io\nimport re\nimport smtplib\nimport ssl\nfrom subprocess import Popen, PIPE\nfrom email.message import EmailMessage\n\n\nsender_email = \"[email protected]\"\nsender_password = \"HisM4k39Vccshwn484jGEvagl\"\nreceiving_emails = [\"[email protected]\"]\n\n\ndef send_email_alert(alert_message: str):\n\n\tssl_port = 465 # For SSL\n\n\t# Create a secure SSL context\n\tcontext = ssl.create_default_context()\n\n\twith smtplib.SMTP_SSL(\"smtp.gmail.com\", ssl_port, context=context) as server:\n\t\tserver.login(sender_email, sender_password)\n\n\t\tfor email in receiving_emails:\n\t\t\tmessage_body = alert_message\n\n\t\t\treceiver_email = email\n\n\t\t\temail_message = EmailMessage()\n\t\t\temail_message.set_content(message_body)\n\t\t\temail_message['Subject'] = f'FATAL'\n\t\t\temail_message['From'] = sender_email\n\t\t\temail_message['To'] = receiver_email\n\n\t\t\tserver.send_message(email_message)\n\n\ndef execute_command(command: list):\n\n\tsubprocess = Popen(command, stdout=PIPE)\n\tresult = subprocess.stdout\n\n\twith io.BufferedReader(result) as result_file:\n\t\trunning_processes = [re.sub(\" +\", \" \", line.decode(\"utf-8\")) for line in result_file]\n\n\treturn running_processes\n\n\ndef main():\n\n\tprocesses_to_find = {\"system_is_up\": {\"process_name\": \"python3 framework_controller.py\", \"email_message\": \"!!!!!Auto-Grow is down. Restart immediately!!!!!\"}}\n\n\t# Get a list of the processes that are currently running\n\tget_current_processes = [\"ps\", \"aux\"]\n\trunning_processes = execute_command(get_current_processes)\n\n\t# Go through each of the running processes and check if framework controller is still running\n\tfor key in processes_to_find.keys():\n\n\t\ttarget_process_is_running = False\n\t\tfor process in running_processes:\n\n\t\t\tif process.find(processes_to_find[key][\"process_name\"]) > -1:\n\t\t\t\ttarget_process_is_running = True\n\t\t\t\tbreak\n\n\t\t# If the process is not running then send an alert\n\t\tif not target_process_is_running:\n\t\t\tprint(\"Process is not running. Sending an email.\")\n\t\t\tsend_email_alert(processes_to_find[key][\"email_message\"])\n\n\nif __name__ == \"__main__\":\n\tmain()" } ]
1
TmTutui/EP-Numerico
https://github.com/TmTutui/EP-Numerico
d9078154c1a9419c3ea9caf7e877a9f9d7e4fb74
8f224065cae20d7bc96c0167a37e385cbbc8a712
e123f03e6a2fc865dacf0be14b457f0f7a56a69c
refs/heads/master
2023-01-08T19:14:28.474823
2020-11-04T15:36:52
2020-11-04T15:36:52
253,892,407
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5133179426193237, "alphanum_fraction": 0.5593533515930176, "avg_line_length": 33.18947219848633, "blob_id": "36ef41caff695309fe3c66ba1f786b988a890fe2", "content_id": "0641dd62d09e5320dae9084f0088b6478e639fb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6526, "license_type": "no_license", "max_line_length": 132, "num_lines": 190, "path": "/Primeira_tarefa/letra_a.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport time\nstart_time = time.time()\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\n\ncurrent_path = os.path.abspath(__file__)\ncurrent_path = current_path.split('/')\ncurrent_path = current_path[:len(current_path) - 1]\ncurrent_path = \"/\".join(current_path)\n \ndef heat_equation(_u0, T, N, _f, lamb, g1, g2, _u):\n \"\"\"\n Heat Equation:\n u0: uo(x) - math function\n N: int (input)\n M: int (input)\n T: float\n i = 1, ..., N-1\n k = 0, ..., M-1\n f: math function - f(t,x)\n u: Heat Equation - u(t, x)\n xi = i∆x, i = 0, · · · , N, com ∆x = 1/N. Para a discretização temporal definimos ∆t = T /M, e\n calculamos aproximações nos instantes tk = k∆t, k = 1, · · · , M. \n A variável u(t, x) descreve a temperatura no instante t na posição x, sendo a distribuição inicial u0(x) dada\n\n return: \n u_old: array\n erro: list\n \"\"\"\n \n print('-'*15+'Heat Equation in progress'+'-'*15+'\\n')\n \n dx = 1/N\n M = int(T*np.power(N, 2)/lamb)\n dt = T/M \n\n # used in u exata\n x_utarget = np.arange(0, 1.0000000001, dx)\n y_utarget = np.array([_u(T, x_utarget[i]) for i in range(len(x_utarget))])\n\n # used in aprox\n u_old = np.array([_u0(x_utarget[i]) for i in range(len(x_utarget))])\n\n # u for every 0.1 units of time\n u_interval = np.array([u_old])\n list_times = [i for i in range(0, M +1 ,M//10)]\n \n for k in tqdm(range(0, M)):\n # adicionar u(k+1,0) na u_new\n u_new = np.array([g1])\n\n for i in range(1, N):\n u_new = np.append(u_new, u_old[i] + dt * ((u_old[i-1] - 2*u_old[i] + u_old[i+1]) / np.power(dx, 2) + _f(k*dt,i*dx) )) \n\n # adicionar u(k+1,N) na u_new\n u_new = np.append(u_new, g2)\n\n u_old = u_new.copy()\n\n if( (k+1) in list_times ):\n u_interval = np.append(u_interval, [u_old], axis = 0)\n \n # calcular o erro\n erro = np.max(abs(y_utarget-u_old))\n \n print('-'*15+'Heat Equation done'+'-'*15+'\\n')\n return u_interval, erro \n \ndef plot(us, _u, erro):\n \"\"\"\n Plot a graph using matplotlib\n us: array with heat_equation values (n=3)\n _u: array - y_utarget\n erro: list of floats\n\n Save figures at figuras_a\n \"\"\" \n import matplotlib.pyplot as plt\n import matplotlib as mpl\n mpl.rcParams['lines.linewidth'] = 0.1\n plt.rcParams[\"figure.figsize\"] = (30,15)\n \n fig, axs = plt.subplots(3,11, gridspec_kw={ 'hspace' : 0.45, 'wspace': 0.47})\n fig.suptitle('Plot para N = ' + str(len(us[0][0])-1))\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n \n x_us = np.arange(0,1.0000000000001,1/(len(us[0][0])-1))\n us_dots = [8 for i in range(len(us[0][0]))] # list of dot sizes\n\n # Valores da solução exata\n x_utarget = np.arange(0,1,0.001)\n y_target = np.array([_u(1, x_utarget[i]) for i in range(len(x_utarget))])\n \n target_dots = [0.1 for i in range(len(y_target))] # list of dot sizes\n \n\n for i in range(11):\n axs[0,i].scatter(x_us, us[0][i], s=us_dots, c='#119822')\n \"\"\" axs[0,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[0,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i].yaxis.label.set_color('#119822')\n\n axs[1,i].scatter(x_us, us[1][i], s=us_dots, c='#FF8C00')\n \"\"\" axs[1,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[1,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i + 11].yaxis.label.set_color('#FF8C00')\n\n axs[2,i].scatter(x_us, us[2][i], s=us_dots, c='#EC0B43')\n \"\"\" axs[2,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[2,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i + 22].set(xlabel='tempo = ' + str(i/10))\n axs.flat[i + 22].yaxis.label.set_color('#EC0B43')\n\n\n axs.flat[0].set(ylabel='Lambda = 0.25')\n axs.flat[11].set(ylabel='Lambda = 0.5')\n axs.flat[22].set(ylabel='Lambda = 0.51')\n\n\n axs.flat[10].scatter(x_utarget, y_target, s=target_dots, alpha=0.1)\n axs.flat[21].scatter(x_utarget, y_target, s=target_dots, alpha=0.1)\n axs.flat[32].scatter(x_utarget, y_target, s=target_dots, alpha=0.1)\n\n axs.flat[10].yaxis.set_label_position(\"right\")\n axs.flat[10].yaxis.label.set_color('black')\n axs.flat[10].yaxis.label.set_fontsize(17)\n axs.flat[10].set(ylabel=\"erro(T=1) = \"+str(round(erro[0],10)))\n \n axs.flat[21].yaxis.set_label_position(\"right\")\n axs.flat[21].yaxis.label.set_color('black')\n axs.flat[21].yaxis.label.set_fontsize(17)\n axs.flat[21].set(ylabel=\"erro(T=1) = \"+str(round(erro[1],10)))\n\n axs.flat[32].yaxis.set_label_position(\"right\")\n axs.flat[32].yaxis.label.set_color('black')\n axs.flat[32].yaxis.label.set_fontsize(17)\n axs.flat[32].set(ylabel=\"erro(T=1) = \"+str(round(erro[2],10)))\n\n # save image as png\n if sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):\n fig.savefig(r\"Primeira_tarefa\\figuras_a\\Figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n elif sys.platform.startswith('darwin') or sys.platform.startswith('linux'):\n fig.savefig(current_path + \"/figuras_a\" +\"/Figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n else:\n print('--- AIX: saving fig at current directory ---')\n fig.savefig(\"letra_a_figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n\ndef main():\n T = 1\n \n # condições de fronteira nulas\n g1 = 0\n g2 = 0\n \n try:\n N = int(input(\"Type N: \"))\n except:\n print(\"Wrong type! N must be an integer!\")\n N = int(input(\"Type N: \"))\n\n def _f(t, x):\n \"Descrição da fonte de calor ao longo do tempo\"\n return 10*np.cos(10*t) * x**2 * (1-x)**2 - (1 + np.sin(10*t))*(12*x**2 - 12*x + 2)\n \n def _u0(x):\n \"Condição de contorno\"\n return np.power(x, 2) * np.power((1 - x), 2)\n\n #solucao exata que precisamos nos aproximar:\n def _u(t, x):\n \"Target solution\"\n return (1 + np.sin(10*t)) * x**2 * (1 - x)**2 \n \n us = []\n erros = []\n \n for lamb in np.array([0.25 , 0.5 , 0.51]):\n u_olds, erro = heat_equation(_u0, T, N, _f, lamb, g1, g2, _u)\n us.append(u_olds)\n erros.append(erro)\n \n plot(us, _u, erros)\n print(\"--- %s seconds ---\"%round(time.time() - start_time, 4))\n\nmain()" }, { "alpha_fraction": 0.43421053886413574, "alphanum_fraction": 0.5482456088066101, "avg_line_length": 16.461538314819336, "blob_id": "551af4715225ef2a98283da0b0617a421cd46359", "content_id": "0b11310931caad2d3791b2202c31832a97b5f85d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 228, "license_type": "no_license", "max_line_length": 39, "num_lines": 13, "path": "/Primeira_tarefa/delete.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\n\ndef u(x):\n print(math.exp(1-x)*math.cos(5*x)) \n print(np.exp(1-x)*np.cos(5*x))\n\n\n\"\"\" print(max([0,1,2,3,4,5.5,5,6]))\nprint(np.max([0,1,2,3,4,5.5,5,6])) \"\"\"\n\nfor i in range(11):\n u(i/10)\n\n" }, { "alpha_fraction": 0.44327694177627563, "alphanum_fraction": 0.4762243926525116, "avg_line_length": 24.880184173583984, "blob_id": "0b1a77184121f9b740d468be065ba8d1b6930a57", "content_id": "0ee13d3eaf58aaca54adb0dd2749b8449f5937b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5665, "license_type": "no_license", "max_line_length": 117, "num_lines": 217, "path": "/Segunda_tarefa/item_b.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "import time\nstart_time = time.time()\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom Segunda_tarefa.item_a import plot, decompose_A, calculate_x, calculate_y, calculate_z\n\ndef main():\n try:\n N = int(input(\"Type N: \"))\n except:\n print(\"Wrong type! N must be an integer!\")\n N = int(input(\"Type N: \"))\n\n part_a(N)\n part_b(N)\n part_c(N)\n\ndef heat_equation(_u0, T, N, _f, _g1, _g2, _u=None):\n \"\"\"\n Heat Equation:\n u0: uo(x) - math function\n N: int (input)\n M: int (input)\n T: float\n i = 1, ..., N-1\n k = 0, ..., M-1\n f: math function - f(t,x)\n u: Heat Equation - u(t, x)\n xi = i∆x, i = 0, · · · , N, com ∆x = 1/N. Para a discretização temporal definimos ∆t = T /M, e\n calculamos aproximações nos instantes tk = k∆t, k = 1, · · · , M. \n A variável u(t, x) descreve a temperatura no instante t na posição x, sendo a distribuição inicial u0(x) dada\n\n return: \n u_old: array\n erro: list\n \"\"\"\n \n print('-'*15+'Heat Equation in progress'+'-'*15+'\\n')\n \n dx = 1/N\n dt = dx\n M = int(T/dt) \n lamb = 1/dx\n\n # used in u exata\n x_utarget = np.arange(0, 1.0000000001, dx)\n if(_u != None):\n y_utarget = np.array([_u(i) for i in x_utarget])\n\n # used in aprox\n u_old = np.array([_u0(i) for i in x_utarget])\n\n # u for every 0.1 units of time\n u_interval = np.array([u_old])\n list_times = [i for i in range(0, M +1 ,M//10)]\n\n # matrix A\n A_diag = np.array([(1+2*lamb) for i in range(N-1)])\n A_sub = np.array([(-lamb) for i in range(N-2)])\n\n diag_D, sub_L = decompose_A(A_diag,A_sub)\n\n # Ax = b ou seja A*u_new[1:N-1] = b\n for k in tqdm(range(0, M)):\n # adicionar u(k+1,0) na u_new\n u_new = np.array([_g1((k+1)*dt)])\n\n # create b \n b = np.array([u_old[1] + dt*_f(dt*(k+1),dx) + lamb*_g1(dt*(k+1))])\n for i in range(2, N-1):\n # it is possible to do everything in a loop cause g1=g2=0\n b = np.append(b, u_old[i] + dt*_f(dt*(k+1),dx*i))\n\n b = np.append(b, u_old[N-1] + dt*_f(dt*(k+1),dx*(N-1)) + lamb*_g2(dt*(k+1)) )\n\n # find x\n y = calculate_y(sub_L,b)\n z = calculate_z(diag_D,y)\n x = calculate_x(sub_L,z)\n\n for x_element in x:\n u_new = np.append(u_new, x_element)\n \n # adicionar u(k+1,N) na u_new\n u_new = np.append(u_new, _g2((k+1)*dt))\n \"\"\" print(u_new) \"\"\"\n \n u_old = u_new.copy()\n\n if( (k+1) in list_times ):\n u_interval = np.append(u_interval, [u_old], axis = 0)\n\n if(_u != None):\n # calcular o erro\n erro = np.max(abs(y_utarget-u_old))\n \n print('-'*15+'Heat Equation done'+'-'*15+'\\n')\n return u_interval, erro\n\n else:\n print('-'*15+'Heat Equation done'+'-'*15+'\\n')\n return u_interval\n\ndef part_a(N):\n T = 1\n \n def _f(t, x):\n \"Descrição da fonte de calor ao longo do tempo\"\n return 10*np.cos(10*t) * x**2 * (1-x)**2 - (1 + np.sin(10*t))*(12*x**2 - 12*x + 2)\n \n def _u0(x):\n \"Condição de contorno\"\n return np.power(x, 2) * np.power((1 - x), 2)\n\n #solucao exata que precisamos nos aproximar:\n def _u(x):\n \"Target solution\"\n return (1 + np.sin(10*1)) * x**2 * (1 - x)**2 \n \n def _g1(t):\n \"Condição de fronteira x = 0.\"\n return 0\n \n def _g2(t):\n \"Condição de fronteira x = 1.\"\n return 0\n \n us = []\n erros = []\n\n \n u_old, erro = heat_equation(_u0, T, N, _f, _g1, _g2, _u)\n us.append(u_old)\n \n erros.append(erro)\n \n plot(us, \"b\", \"A\", _u, erros)\n\ndef part_b(N):\n \n def _u0(x):\n \"Distribuição inicial.\"\n return np.exp(-x)\n \n def _g1(t):\n \"Condição de fronteira x = 0.\"\n return np.exp(t)\n \n def _g2(t):\n \"Condição de fronteira x = 1.\"\n return np.exp(t-1)*np.cos(5*t)\n \n def _f(t, x):\n \"Descrição da fonte de calor ao longo do tempo\"\n return np.exp(t-x)*5*(5*np.power(t,2)*np.cos(5*t*x) - (x + 2*t)*np.sin(5*t*x))\n # return -np.exp(t-x)*(5*x*np.sin(5*t*x) - np.cos(5*t*x) + 10*t*np.sin(5*t*x) + (1-25*t*t)*np.cos(5*t*x) )\n \n T = 1\n \n def _u(x):\n \"Target solution (with t=T=1)\"\n return np.exp(1-x)*np.cos(5*x) \n\n us = []\n erros = []\n \n \n u_old, erro = heat_equation(_u0, T, N, _f, _g1, _g2, _u)\n us.append(u_old)\n \n erros.append(erro)\n \n plot(us, 'b', \"B\", _u, erros)\n\n\ndef part_c(N):\n \n def _u0(x):\n \"Distribuição inicial.\"\n return 0\n \n def _g1(t):\n \"Condição de fronteira x = 0.\"\n return 0\n \n def _g2(t):\n \"Condição de fronteira x = 1.\"\n return 0\n \n def _f(t, x):\n \"Descrição da fonte de calor ao longo do tempo, = r(t) * Gh(x) \"\n p = 0.25\n # h = dx\n dx = 1/N\n\n if (p-dx <= x <= p):\n \"gh(x) poderia assumir o valor 1/h em p e variar linearmente de 0 a 1/h no intervalo [p − h, p]\"\n return 10000*(1-2*np.power(t,2)) * ((1/np.power(dx,2))*(x + dx - p))\n\n elif (p < x <= p + dx):\n \"e (gh(x) poderia assumir o valor de 1/h a 0 no intervalo [p, p + h], sendo nula no restante do domínio.\"\n return 10000*(1-2*np.power(t,2)) * ((1/np.power(dx,2))*(-x + dx + p))\n \n else:\n return 0\n \n T = 1\n us = []\n \n u_olds = heat_equation(_u0, T, N, _f, _g1, _g2)\n us.append(u_olds)\n\n \n plot(us, 'b', \"C\")\n\nmain()" }, { "alpha_fraction": 0.4951871633529663, "alphanum_fraction": 0.5005347728729248, "avg_line_length": 24.97222137451172, "blob_id": "4f9dfa67b146129659737fc8143863537895c0e8", "content_id": "319bd5f8aec935ade11ce4521251d78f817bf41c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "no_license", "max_line_length": 62, "num_lines": 36, "path": "/main.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\ndef main():\n \n tarefa = int(input(\"Digite a tarefa (1 ou 2): \"))\n \n if(tarefa == 1):\n alternative = input(\"Type an alternative (a,b ou c):\")\n if(alternative.lower() == \"a\"): \n from Primeira_tarefa import letra_a\n letra_a\n\n elif(alternative.lower() == \"b\"):\n from Primeira_tarefa import letra_b\n letra_b\n \n elif(alternative.lower() == \"c\"):\n from Primeira_tarefa import letra_c\n letra_c\n \n else:\n print(\"Não existe esse item, tente novamente!\")\n\n\n elif(tarefa == 2):\n alternative = input(\"Type an alternative (b ou c):\")\n\n if(alternative.lower() == \"b\"):\n from Segunda_tarefa import item_b\n item_b\n\n elif(alternative.lower() == \"c\"):\n from Segunda_tarefa import item_c\n item_c\n \n\nmain()\n" }, { "alpha_fraction": 0.4863712191581726, "alphanum_fraction": 0.5292045474052429, "avg_line_length": 30.729412078857422, "blob_id": "a28ba6c22be63a9070ebec1b588f36b10edc1af1", "content_id": "52ddfa2df11742475ec9cb9e88c33c509fccc04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5427, "license_type": "no_license", "max_line_length": 131, "num_lines": 170, "path": "/Primeira_tarefa/letra_c.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport time\nstart_time = time.time()\nimport sys\nimport numpy as np\nfrom tqdm import tqdm\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\n\ncurrent_path = os.path.abspath(__file__)\ncurrent_path = current_path.split('/')\ncurrent_path = current_path[:len(current_path) - 1]\ncurrent_path = \"/\".join(current_path)\n\n\ndef heat_equation(T, N, _f, lamb):\n \"\"\"\n Heat Equation:\n u0: int = 0\n N: int (input)\n M: int (input)\n T: float\n i = 1, ..., N-1\n k = 0, ..., M-1\n f: math function - f(t,x)\n xi = i∆x, i = 0, · · · , N, com ∆x = 1/N. Para a discretização temporal definimos ∆t = T /M, e\n calculamos aproximações nos instantes tk = k∆t, k = 1, · · · , M. \n A variável u(t, x) descreve a temperatura no instante t na posição x, sendo a distribuição inicial u0(x) dada\n\n return: \n u_old: array\n \"\"\"\n \n print('-'*15+'Heat Equation in progress'+'-'*15+'\\n')\n \n dx = 1/N\n M = int(T*np.power(N, 2)/lamb)\n dt = T/M \n\n # used in aprox, u0 = 0\n u_old = np.array([0 for i in range(N+1)])\n u_new = np.array(np.array([]))\n\n # u for every 0.1 units of time\n u_interval = np.array([u_old])\n list_times = [i for i in range(0, M +1 ,M//10)]\n \n for k in tqdm(range(0, M)):\n\n # adicionar u(k+1,0) = 0 na u_new \n u_new = np.append(u_new, 0)\n\n for i in range(1, N):\n u_new = np.append(u_new, u_old[i] + dt * ((u_old[i-1] - 2*u_old[i] + u_old[i+1]) / np.power(dx, 2) + _f(k*dt,i*dx,dx)))\n \n # adicionar u(k+1,N) = 0 na u_new\n u_new = np.append(u_new, 0)\n \n u_old = u_new.copy()\n u_new = []\n\n if( (k+1) in list_times ):\n u_interval = np.append(u_interval, [u_old], axis = 0)\n\n \n \n print('-'*15+'Heat Equation done'+'-'*15+'\\n')\n return u_interval\n\n\ndef plot(us):\n \"\"\"\n Plot a graph using matplotlib\n us: array with heat_equation values (n=3)\n\n Save figures at figuras_c\n \"\"\"\n import matplotlib.pyplot as plt\n import matplotlib as mpl\n mpl.rcParams['lines.linewidth'] = 0.1\n plt.rcParams[\"figure.figsize\"] = (30,15)\n \n fig, axs = plt.subplots(3,11, gridspec_kw={ 'hspace' : 0.45, 'wspace': 0.47})\n fig.suptitle('Plot para N = ' + str(len(us[0][0])-1))\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n \n x_us = np.arange(0,1.0000000000001,1/(len(us[0][0])-1))\n us_dots = [8 for i in range(len(us[0][0]))] # list of dot sizes\n \n for i in range(11):\n axs[0,i].scatter(x_us, us[0][i], s=us_dots, c='#119822')\n \"\"\" axs[0,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[0,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i].yaxis.label.set_color('#119822')\n\n axs[1,i].scatter(x_us, us[1][i], s=us_dots, c='#FF8C00')\n \"\"\" axs[1,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[1,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i + 11].yaxis.label.set_color('#FF8C00')\n\n axs[2,i].scatter(x_us, us[2][i], s=us_dots, c='#EC0B43')\n \"\"\" axs[2,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[2,i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i + 22].set(xlabel='tempo = ' + str(i/10))\n axs.flat[i + 22].yaxis.label.set_color('#EC0B43')\n\n axs.flat[0].set(ylabel='Lambda = 0.25')\n axs.flat[11].set(ylabel='Lambda = 0.5')\n axs.flat[22].set(ylabel='Lambda = 0.51')\n\n # save image as png\n if sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):\n fig.savefig(r\"Primeira_tarefa\\figuras_c\\Figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n elif sys.platform.startswith('darwin') or sys.platform.startswith('linux'):\n fig.savefig(current_path + \"/figuras_c\" +\"/Figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n else:\n print('--- AIX: saving fig at current directory ---')\n fig.savefig(\"letra_c_figure of n = {}.png\".format(len(us[0][0])-1), dpi=300)\n\ndef main():\n \n def _u0(x):\n \"Distribuição inicial.\"\n return 0\n \n def _g1(t):\n \"Condição de fronteira x = 0.\"\n return 0\n \n def _g2(t):\n \"Condição de fronteira x = 1.\"\n return 0\n \n def _f(t, x, dx):\n \"Descrição da fonte de calor ao longo do tempo, = r(t) * Gh(x) \"\n p = 0.25\n # h = dx\n\n if (p-dx <= x <= p):\n \"gh(x) poderia assumir o valor 1/h em p e variar linearmente de 0 a 1/h no intervalo [p − h, p]\"\n return 10000*(1-2*np.power(t,2)) * ((1/np.power(dx,2))*(x + dx - p))\n\n elif (p < x <= p + dx):\n \"e (gh(x) poderia assumir o valor de 1/h a 0 no intervalo [p, p + h], sendo nula no restante do domínio.\"\n return 10000*(1-2*np.power(t,2)) * ((1/np.power(dx,2))*(-x + dx + p))\n \n else:\n return 0\n \n T = 1\n \n try:\n N = int(input(\"Type N: \"))\n except:\n print(\"Wrong type! N must be an integer!\")\n N = int(input(\"Type N: \"))\n \n\n us = []\n \n for lamb in [0.25 , 0.5 , 0.51]:\n u_olds = heat_equation(T, N, _f, lamb)\n us.append(u_olds)\n\n \n plot(us)\n print(\"--- %s seconds ---\"%round(time.time() - start_time, 4))\n\nmain()" }, { "alpha_fraction": 0.5280218720436096, "alphanum_fraction": 0.5662956237792969, "avg_line_length": 28.606935501098633, "blob_id": "64e0e3d7155d5f6894acb99065abefc943367cb6", "content_id": "6d78e1c3633859a8068a16128bcca98c0df18cf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5123, "license_type": "no_license", "max_line_length": 130, "num_lines": 173, "path": "/Segunda_tarefa/item_a.py", "repo_name": "TmTutui/EP-Numerico", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport numpy as np\n\ndef decompose_A(principal,sub):\n \"Funcao que decompoe a matriz A em L e em D (A = LD(L^-1))\"\n # Recebe duas listas: a diagonal principal de A e a subdiagonal de A\n # Retorna duas listas: a diagonal Principal de D e a subdiagonal de L\n\n tamanho = len(principal)\n princ_D = np.array([])\n sub_L = np.array([])\n\n princ_D = np.append(princ_D,principal[0])\n\n\n for i in range(0,tamanho-1):\n l_i = sub[i]/princ_D[i] \n\n sub_L = np.append(sub_L,l_i)\n\n princ_D = np.append(princ_D , principal[i+1]-(princ_D[i]*np.power(l_i,2)))\n\n return princ_D,sub_L\n\n@DeprecationWarning\ndef dot_product(A,B): # work for two square matrices\n \"Receive two square matrices and return a Matrix\"\n matrix = np.array([[0 for i in range(len(A))]])\n for i in range(len(A)-1):\n line = np.array([])\n for j in range(len(A)):\n line = np.append(line,0)\n\n matrix = np.append(matrix, [line], axis=0)\n\n\n for i in range(len(A)): \n for j in range(len(B[0])): \n for k in range(len(B)): \n matrix[i][j] += A[i][k] * B[k][j] \n\n return matrix\n\n@DeprecationWarning\ndef transpose(A): # work for squace matrix\n \"Receive a matrix and return it transposed\"\n A = np.array(A) #make sure A is a np array not a list\n A_t = A.copy()\n\n for i in range(len(A)):\n for j in range(len(A[0])):\n A_t[j][i] = A[i][j]\n \n return A_t\n\ndef calculate_y(sub_L,b):\n \"Receive sub diagonal of L(list) and the column matrix b and return y\"\n # Ly = b\n\n y = np.array([b[0]])\n \n for i in range(len(b)-1):\n y = np.append(y, b[i+1] - sub_L[i]*y[i])\n \n return y\n\ndef calculate_z(diag_D,y):\n \"Receive diagonal of D(list) and the column matrix y and return z\"\n # Dz = y\n\n z = np.array([])\n \n for i in range(len(y)):\n z = np.append(z, y[i]/diag_D[i])\n \n return z\n\ndef calculate_x(sub_L,z):\n \"Receive super diagonal of L transposed(list) and the column matrix z and return x\"\n # L*(transposed)x = z\"\n \n x = np.zeros(len(z))\n x[len(x)-1] = z[len(z)-1]\n \n for i in range(len(z)-2,-1,-1):\n x[i] = z[i] - sub_L[i]*x[i+1]\n \n return x\n\ndef teste4():\n A = [\n [32.5,65,0,0],\n [65,131,5,0],\n [0,5,28,6],\n [0,0,6,16],\n ]\n\n b=[\n 5677.75,\n 11477.3,\n 23791.08,\n 61804.16\n ]\n\n diag_D, sub_L = decompose_A([32.5,131,28,16],[65,5,6])\n # Ly = b, Dz = y e L*x = z\n y = calculate_y(sub_L,b)\n z = calculate_z(diag_D,y)\n x = calculate_x(sub_L,z)\n\n print(x)\n\ndef plot(us, letter, part, _u=None, erro=None):\n \"\"\"\n Plot a graph using matplotlib\n us: array with heat_equation values (n=3)\n _u: array - y_utarget\n erro: list of floats\n \"\"\" \n import matplotlib.pyplot as plt\n import matplotlib as mpl\n import sys\n import os\n\n # Save parameters\n current_path = os.path.abspath(__file__)\n current_path = current_path.split('/')\n current_path = current_path[:len(current_path) - 1]\n current_path = \"/\".join(current_path)\n\n # Figure parameters\n mpl.rcParams['lines.linewidth'] = 0.1\n plt.rcParams[\"figure.figsize\"] = (20,2.2)\n \n fig, axs = plt.subplots(1,11, gridspec_kw={ 'hspace' : 1.5, 'wspace': 0.47}, constrained_layout=True)\n fig.suptitle('Plot para N = ' + str(len(us[0][0])-1))\n plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n \n x_us = np.arange(0,1.0000000000001,1/(len(us[0][0])-1))\n us_dots = [8 for i in range(len(us[0][0]))] # list of dot sizes\n\n # Valores da solução exata\n x_utarget = np.arange(0,1,0.001)\n\n if(_u != None):\n y_target = np.array([_u(i) for i in x_utarget])\n \n target_dots = [0.2 for i in range(len(x_utarget))] # list of dot sizes\n \n # Plotting Graph\n for i in range(11):\n axs[i].scatter(x_us, us[0][i], s=us_dots, c='#119822')\n \"\"\" axs[0,i].set_xticks(np.arange(min(x_us), max(x_us)+1, 0.2)) \"\"\"\n axs[i].ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n axs.flat[i].yaxis.label.set_color('#119822')\n\n if(_u != None):\n axs.flat[10].scatter(x_utarget, y_target, s=target_dots, alpha=0.1)\n\n axs.flat[10].yaxis.set_label_position(\"right\")\n axs.flat[10].yaxis.label.set_color('black')\n axs.flat[10].yaxis.label.set_fontsize(9)\n if(erro != None):\n axs.flat[10].set(ylabel=\"erro(T=1) = \"+str(round(erro[0],10)))\n\n # save image as png\n if sys.platform.startswith('win32') or sys.platform.startswith('cygwin'):\n fig.savefig(r\"Segunda_tarefa\\figuras_{}\\Figure of n = {}, parte {}.png\".format(letter,len(us[0][0])-1, part), dpi=300)\n elif sys.platform.startswith('darwin') or sys.platform.startswith('linux'):\n fig.savefig(current_path + \"/figuras_{}\" +\"/Figure of n = {}, parte {}.png\".format(letter,len(us[0][0])-1, part), dpi=300)\n else:\n print('--- AIX: saving fig at current directory ---')\n fig.savefig(\"letra_{}_figure of n = {}, parte {}.png\".format(letter,len(us[0][0])-1, part), dpi=300)" } ]
6
xl60-hust/clinical-citation-sentiment
https://github.com/xl60-hust/clinical-citation-sentiment
efd88f1360318c5220895e6aeeb031d999083ad0
734e958cd0475dfe1e4a31b93827a3855ec5868d
e1b29e893d44cedaa2a617dad03cc98adff011f0
refs/heads/master
2023-04-01T15:24:55.971358
2020-09-19T23:34:11
2020-09-19T23:34:11
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6566774845123291, "alphanum_fraction": 0.6586949825286865, "avg_line_length": 27.58368492126465, "blob_id": "9cde993bf921d900c7522713e2ca4137eb7daf26", "content_id": "0b9fe05789737c7692abc0758678c819a1a0018f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 20322, "license_type": "no_license", "max_line_length": 154, "num_lines": 711, "path": "/src/gov/nih/nlm/citationsentiment/ml/Features.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment.ml;\n\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.Collection;\nimport java.util.Comparator;\nimport java.util.HashSet;\nimport java.util.LinkedHashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.TreeSet;\nimport java.util.logging.Logger;\n\nimport gov.nih.nlm.citationsentiment.CitationMention;\nimport gov.nih.nlm.citationsentiment.RuleBasedSentiment;\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.Sentence;\nimport gov.nih.nlm.ling.core.Span;\nimport gov.nih.nlm.ling.core.SpanList;\nimport gov.nih.nlm.ling.core.SurfaceElement;\nimport gov.nih.nlm.ling.core.SynDependency;\nimport gov.nih.nlm.ling.core.Word;\nimport gov.nih.nlm.ling.sem.Entity;\nimport gov.nih.nlm.ling.sem.SemanticItem;\nimport gov.nih.nlm.ling.sem.Term;\nimport gov.nih.nlm.ml.feature.DoubleFeature;\nimport gov.nih.nlm.ml.feature.Feature;\nimport gov.nih.nlm.ml.feature.FeatureSet;\nimport gov.nih.nlm.ml.feature.StringFeature;\nimport gov.nih.nlm.ml.feature.StringSetFeature;\nimport gov.nih.nlm.util.Strings;\nimport liblinear.FeatureNode;\n\npublic class Features<T extends CitationMention> extends FeatureSet<T> {\n\tprivate static Logger log = Logger.getLogger(Features.class.getName());\t\n\t\n\tprivate Map<String,String> usedTerms = null;\n\t\n\tpublic static final Comparator<FeatureNode> FEATURE_NODE_COMPARATOR =\n\t\t\tnew Comparator<FeatureNode>() {\n\t\tpublic int compare(final FeatureNode f1, final FeatureNode f2) {\n\t\t\treturn f1.index - f2.index;\n\t\t}\n\t};\n\t\n\tpublic void setUsedTerms(Map<String,String> terms) {\n\t\tthis.usedTerms = terms;\n\t}\n\n\t/**\n\t * {@inheritDoc}\n\t */\n\t@Override\n\tpublic Set<Feature<T,?>> getFeatures() {\n\t\tfinal Set<Feature<T,?>> features = newSet();\n\t\tfor (final Type type : Type.values()) {\n\t\t\tfor (int i=1; i<= 5; i++) {\n\t\t\t\tfeatures.addAll(getFeatures(type,i));\n\t\t\t}\n\t\t}\n\t\treturn features;\n\t}\n\n\tpublic Set<Feature<T,?>> getFeatures(final Type type, final int window) {\n\t\tfinal Set<Feature<T,?>> features = newSet();\n\t\treturn features;\n\t}\n\n\tpublic enum Type {\n\t\tNORMAL {\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\treturn token.getText();\n\t\t\t}\n\t\t},\n\n\t\tUNCASED {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\treturn token.getText().toLowerCase();\n\t\t\t}\n\t\t},\n\n\t\tSTEMMED {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\treturn token.getLemma().toLowerCase();\n\t\t\t}\n\t\t},\n\n\t\tUNCASED_WORDS {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\tif (token instanceof Word) {\n\t\t\t\t\tfinal String rawString = token.getText().toLowerCase();\n\t\t\t\t\tif (Strings.containsLetter(rawString)) return rawString;\n\t\t\t\t\treturn null;\n\t\t\t\t} else {\n\t\t\t\t\treturn token.getText().toLowerCase();\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\t\n\t\tSTEMMED_WORDS {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\tfinal String rawString = token.getLemma().toLowerCase();\n\t\t\t\tif (Strings.containsLetter(rawString)) return rawString;\n\t\t\t\treturn null;\n\t\t\t}\n\t\t},\n\t\t\n\t\tPOS {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\treturn token.getPos();\n\t\t\t}\n\t\t},\n\t\t\n\t\tUNCASED_POS {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\t\treturn token.getText().toLowerCase() + \"_\" + token.getPos();\n\t\t\t}\n\t\t},\n\t\t\n\t\tSTEMMED_POS {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\tfinal String rawString = token.getLemma().toLowerCase();\n\t\t\t\treturn rawString + \"_\" + token.getPos();\n\t\t\t}\n\t\t},\n\t\t\n\t\tCAT {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\treturn token.getCategory();\n\t\t\t}\n\t\t},\n\t\t\n\t\tSTEMMED_CAT {\n\t\t\t@Override\n\t\t\tpublic String convert(final SurfaceElement token) {\n\t\t\t\tfinal String rawString = token.getLemma().toLowerCase();\n\t\t\t\treturn rawString + \"_\" + token.getCategory();\n\t\t\t}\n\t\t};\n\n\t\t/**\n\t\t * Converts the given {@link SurfaceElement} to a <code>String</code>. A\n\t\t * <code>null</code> value indicates the <var>token</var> should not be\n\t\t * included.\n\t\t */\n\t\tpublic abstract String convert(SurfaceElement token);\n\t}\n\t\n\t public enum TokenType {\t\t\n\t\t\tWORD {\n\t\t\t\t@Override\n\t\t\t\tpublic List<SurfaceElement> convert(final Sentence sentence) {\n\t\t\t\t\treturn new ArrayList<SurfaceElement>(sentence.getWords());\n\t\t\t\t}\n\t\t\t},\n\t\t\t\n\t\t\tUNIT {\n\t\t\t\t@Override\n\t\t\t\tpublic List<SurfaceElement> convert(final Sentence sentence) {\n\t\t\t\t\treturn sentence.getSurfaceElements();\n\t\t\t\t}\n\t\t\t};\n\t\t\t\n\t\t\tpublic abstract List<SurfaceElement> convert(Sentence sentence);\n\t\t }\n\t\n\t \n/*\tpublic static void surfaceElementNgrams(final List<SurfaceElement> surfaceElements, final CitationMention cm,\n\t\t\tfinal int n,\n\t\t\tfinal Type type,\n\t\t\tfinal Collection<String> grams) {\n\t\tfor (int i = 0; i <= surfaceElements.size() - n; i++) {\n\t\t\tfinal List<String> words = new ArrayList<String>(n);\n\t\t\tfor (int j = i; j < i + n; j++) {\n\t\t\t\tfinal SurfaceElement token = surfaceElements.get(j);\n\t\t\t\tString cs = getCitationString(cm,token);\n\t\t\t\tif (cs != null) {\n\t\t\t\t\twords.add(cs);\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tfinal String word = type.convert(token);\n\t\t\t\t\tif (word == null) {\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\twords.add(word);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (words.size() == n) {\n\t\t\t\tgrams.add(Strings.join(words, \"__\"));\n\t\t\t}\n\t\t}\n\t}*/\n\t\n/*\tpublic static void ngrams(final List<Word> words, final CitationMention cm,\n\t\t\tfinal int n,\n\t\t\tfinal Type type,\n\t\t\tfinal Collection<String> grams) {\n\t\tfor (int i = 0; i <= words.size() - n; i++) {\n\t\t\tfinal List<String> ngrams = new ArrayList<String>(n);\n\t\t\tfor (int j = i; j < i + n; j++) {\n\t\t\t\tfinal Word token = words.get(j);\n\t\t\t\tString cs = getCitationString(cm,token);\n\t\t\t\tif (cs != null) {\n\t\t\t\t\tngrams.add(cs);\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\t\t\t\tfinal String word = type.convert(token);\n\t\t\t\tif (word == null) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tngrams.add(word);\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (ngrams.size() == n) {\n\t\t\t\tgrams.add(Strings.join(ngrams, \"__\"));\n\t\t\t}\n\t\t}\n\t}*/\n\t\n\tpublic static void stringNgrams(final List<String> words, \n\t\t\tfinal int n,\n\t\t\tfinal Collection<String> grams) {\n\t\tfor (int i = 0; i <= words.size() - n; i++) {\n\t\t\tfinal List<String> ngrams = new ArrayList<String>(n);\n\t\t\tfor (int j = i; j < i + n; j++) {\n\t\t\t\tfinal String word = words.get(j);\n\t\t\t\tif (word == null) {\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t\telse {\n\t\t\t\t\tngrams.add(word);\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (ngrams.size() == n) {\n\t\t\t\tgrams.add(Strings.join(ngrams, \"__\"));\n\t\t\t}\n\t\t}\n\t}\n\t\t\n\tprivate class ContextNGramFeature extends StringSetFeature<T> {\n\t\tprivate final int n;\n\t\tprivate final Type type;\n\t\tprotected ContextNGramFeature(final String name,\n\t\t\t\tfinal int n,\n\t\t\t\tfinal Type type) {\n\t\t\tsuper(name);\n\t\t\tthis.n = n;\n\t\t\tthis.type = type;\n\t\t}\n\t\t@Override\n\t\tpublic Set<String> compute(final T span) {\n\t\t\tfinal List<Span> context = span.getContext();\n\t\t\tfinal Set<String> grams = new TreeSet<String>();\n\t\t\tList<String> tokens = new ArrayList<>();\n\t\t\tfor (Span sp: context) {\n\t\t\t\tList<SurfaceElement> surfs = span.getDocument().getSurfaceElementsInSpan(sp);\n\t\t\t\tfor (SurfaceElement surf: surfs) {\n\t\t\t\t\tif (isCitationMention(surf)) {\n\t\t\t\t\t\tString cs = getCitationString(span,surf);\n\t\t\t\t\t\ttokens.add(cs);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfor (Word w: surf.toWordList()) {\n\t\t\t\t\t\t\ttokens.add(type.convert(w));\n\t\t\t\t\t\t}\n\t\t\t\t\t}\t\t\t\n\t\t\t\t}\n\t\t\t}\n\t\t\tstringNgrams(tokens, n,grams);\n\t\t\treturn grams;\n\t\t}\n\t}\n\t\n\tpublic class NegationNGramFeature extends StringSetFeature<T> {\n\t\tprivate final Type type;\n\t\tprotected NegationNGramFeature(final String name,\n\t\t\t\tfinal Type type) {\n\t\t\tsuper(name);\n\t\t\tthis.type = type;\n\t\t}\n\t\t@Override\n\t\tpublic Set<String> compute(final T cm) {\n\t\t\tList<SurfaceElement> negs = getNegationClues(cm);\n\t\t\tSet<String> strs = new HashSet<>();\n\t\t\tfor (SurfaceElement neg: negs) {\n\t\t\t\tSentence sent = neg.getSentence();\n\t\t\t\tint negind = sent.getSurfaceElements().indexOf(neg);\n\t\t\t\tint total = sent.getSurfaceElements().size();\n\t\t\t\tSurfaceElement next = null; SurfaceElement nextnext = null;\n\t\t\t\tif (negind < total -1) \n\t\t\t\t\tnext = sent.getSurfaceElements().get(negind+1);\n\t\t\t\tif (negind < total -2)\n\t\t\t\t\tnextnext = sent.getSurfaceElements().get(negind+2);\n\t\t\t\tif (next != null) {\n\t\t\t\t\tString cs = getCitationString(cm,next);\n\t\t\t\t\tif (cs != null) {\n\t\t\t\t\t\tstrs.add(\"NOT_\" + cs);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tString c = type.convert(next);\n\t\t\t\t\t\tif (c != null) strs.add(\"NOT_\" + c);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (nextnext != null) {\n\t\t\t\t\tString cs = getCitationString(cm,nextnext);\n\t\t\t\t\tif (cs != null) {\n\t\t\t\t\t\tstrs.add(\"NOT_\" + cs);\n\t\t\t\t\t}\n\t\t\t\t\telse {\n\t\t\t\t\t\tString c = type.convert(nextnext);\n\t\t\t\t\t\tif (c != null) strs.add(\"NOT_\" + c);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn strs;\n\t\t}\n\t}\n\t\n\tprivate class StructureNGramFeature extends StringSetFeature<T> {\n\t\tprivate final int n;\n\t\tprotected StructureNGramFeature(final String name,\n\t\t\t\tfinal int n) {\n\t\t\tsuper(name);\n\t\t\tthis.n = n;\n\t\t}\n\t\t@Override\n\t\tpublic Set<String> compute(final T span) {\n\t\t\tfinal List<Span> context = span.getContext();\n\t\t\tfinal Set<String> grams = new TreeSet<String>();\n\t\t\tfor (Span sp: context) {\n\t\t\t\tList<String> sems = getSemanticString(span,sp);\n\t\t\t\tSet<String> ngrams = new TreeSet<>();\n\t\t\t\tstringNgrams(sems,n,ngrams);\n\t\t\t\tgrams.addAll(ngrams);\n\t\t\t}\n\t\t\treturn grams;\n\t\t}\n\t}\n\t\n\tpublic class StructureDirectionFeature extends StringSetFeature<T> {\n\t\tprotected StructureDirectionFeature() {\n\t\t\tsuper(\"StructureDirection\");\n\t\t}\n\t\t@Override\n\t\tpublic Set<String> compute(final T span) {\n\t\t\tList<String> types = Arrays.asList(\"CITINGWORK\",\"[TC]\",\"[OC]\",\"CONTRAST\");\n\t\t\tfinal List<Span> context = span.getContext();\n\t\t\tfinal Set<String> grams = new TreeSet<String>();\n\t\t\tfor (Span sp: context) {\n\t\t\t\tList<String> sems = getSemanticString(span,sp);\n\t\t\t\tif (sems.contains(\"CONTRAST\") == false) continue;\n\t\t\t\tList<Integer> contrastInds = new ArrayList<>();\n\t\t\t\tList<String>subsems = new ArrayList<>();\n\t\t\t\tfor (int i=0; i < sems.size(); i++) {\n\t\t\t\t\tString sem = sems.get(i);\n\t\t\t\t\tif (types.contains(sem)) subsems.add(sem);\n\t\t\t\t\tif (sem.equals(\"CONTRAST\")) contrastInds.add(i);\n\t\t\t\t}\n\t\t\t\tfor (int i=0; i < subsems.size(); i++) {\n\t\t\t\t\tString s = subsems.get(i);\n\t\t\t\t\tif (s.equals(\"CONTRAST\")) continue;\n\t\t\t\t\tif (i < contrastInds.get(0)) grams.add(s + \"_CONTRAST_DIR\");\n\t\t\t\t\telse if (i > contrastInds.get(0) && i < contrastInds.get(contrastInds.size()-1)) {\n\t\t\t\t\t\tgrams.add(\"CONTRAST_\" + s + \"_DIR\"); grams.add(s + \"_CONTRAST_DIR\");\n\t\t\t\t\t} \n\t\t\t\t\telse if (i > contrastInds.get(contrastInds.size()-1)) grams.add(\"CONTRAST_\" + s + \"_DIR\");\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn grams;\n\t\t}\n\t}\n\t\n\tpublic class ContextUnigramFeature extends ContextNGramFeature {\n\t\tpublic ContextUnigramFeature(final Type type) {\n\t\t\tsuper(\"ContextUnigram(\" + type.toString().toLowerCase() + \")\", 1, type);\n\t\t}\n\t}\n\n\tpublic class ContextBigramFeature extends ContextNGramFeature {\n\t\tpublic ContextBigramFeature(final Type type) {\n\t\t\tsuper(\"ContextBigram(\" + type.toString().toLowerCase() + \")\", 2, type);\n\t\t}\n\t}\n\n\tpublic class ContextTrigramFeature extends ContextNGramFeature {\n\t\tpublic ContextTrigramFeature(final Type type) {\n\t\t\tsuper(\"ContextTrigram(\" + type.toString().toLowerCase() + \")\", 3, type);\n\t\t}\n\t}\n\n\tpublic class StructureUnigramFeature extends StructureNGramFeature {\n\t\tpublic StructureUnigramFeature() {\n\t\t\tsuper(\"StructureUnigram\", 1);\n\t\t}\n\t}\n\n\tpublic class StructureBigramFeature extends StructureNGramFeature {\n\t\tpublic StructureBigramFeature() {\n\t\t\tsuper(\"StructureBigram\", 2);\n\t\t}\n\t}\n\n\tpublic class StructureTrigramFeature extends StructureNGramFeature {\n\t\tpublic StructureTrigramFeature() {\n\t\t\tsuper(\"StructureTrigram\", 3);\n\t\t}\n\t}\n\t\n\t public class NegationCountFeature extends DoubleFeature<T> {\n\t\t\tpublic NegationCountFeature() {\n\t\t\t super(\"NegationCount\");\n\t\t\t}\n\n\t\t\t@Override\n\t\t\tpublic Double compute(final CitationMention cm) {\n\t\t\t\tList<SurfaceElement> negs = getNegationClues(cm);\n\t\t\t\tdouble cnt = 0;\n\t\t\t\tif (negs != null) cnt = Double.valueOf(negs.size());\n\t\t\t\treturn cnt;\n\t\t\t}\n\t\t }\n\n\tpublic class PosSentimentFeature extends StringFeature<T> {\n\t\tprotected PosSentimentFeature() {\n\t\t\tsuper(\"PosSentiment\");\n\t\t}\n\t\t@Override\n\t\tpublic String compute(final T cm) {\n\t\t\tList<SurfaceElement> posSpans = findDictionaryPhrases(cm,\"POS\");\n\t\t\tList<SurfaceElement> negSpans = findDictionaryPhrases(cm,\"NEG\");\n\t\t\tif (posSpans.size() > 0){\n\t\t\t\tfor (SurfaceElement pos: posSpans){\n\t\t\t\t\tif (inNegationScope(cm.getDocument(),pos) == false) \n\t\t\t\t\t\treturn \"TRUE\";\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (negSpans.size() >0) {\n\t\t\t\tfor (SurfaceElement neg: negSpans){\n\t\t\t\t\tif (inNegationScope(cm.getDocument(),neg))\n\t\t\t\t\t\treturn \"TRUE\";\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"FALSE\";\n\t\t}\n\t}\n\t\n\tpublic class NegSentimentFeature extends StringFeature<T> {\n\t\tprotected NegSentimentFeature() {\n\t\t\tsuper(\"NegSentiment\");\n\t\t}\n\t\t@Override\n\t\tpublic String compute(final T cm) {\n\t\t\tList<SurfaceElement> posSpans = findDictionaryPhrases(cm,\"POS\");\n\t\t\tList<SurfaceElement> negSpans = findDictionaryPhrases(cm,\"NEG\");\n\t\t\tif (negSpans.size() > 0){\n\t\t\t\tfor (SurfaceElement neg: negSpans){\n\t\t\t\t\tif (inNegationScope(cm.getDocument(),neg) == false) \n\t\t\t\t\t\treturn \"TRUE\";\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (posSpans.size() >0) {\n\t\t\t\tfor (SurfaceElement pos: posSpans){\n\t\t\t\t\tif (inNegationScope(cm.getDocument(),pos)) \n\t\t\t\t\t\treturn \"TRUE\";\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn \"FALSE\";\n\t\t}\n\t}\n\t\n\t\n\tpublic class AnySentimentFeature extends StringFeature<T> {\n\t\tprotected AnySentimentFeature() {\n\t\t\tsuper(\"AnySentiment\");\n\t\t}\n\t\t@Override\n\t\tpublic String compute(final T cm) {\n\t\t\tList<SurfaceElement> posSpans = findDictionaryPhrases(cm,\"POS\");\n\t\t\tList<SurfaceElement> negSpans = findDictionaryPhrases(cm,\"NEG\");\n\n\t\t\tif (negSpans.size() > 0 || posSpans.size() > 0){\n\t\t\t\treturn \"TRUE\";\n\t\t\t}\n\t\t\treturn \"FALSE\";\n\t\t}\n\t}\n\t\n\t\n\tpublic class RuleBasedOutputFeature extends StringFeature<T> {\n\t\tProperties props = null;\n\t\tprotected RuleBasedOutputFeature(Properties props) {\n\t\t\tsuper(\"RuleBasedOutput\");\n\t\t\tthis.props = props;\n\t\t}\n\t\t@Override\n\t\tpublic String compute(final T cm) {\n\t\t\t// this assumes the document with the citation mention has been preprocessed\n\t\t\tRuleBasedSentiment ruleBased = RuleBasedSentiment.getInstance(props);\n\t\t\tString result = ruleBased.processMention(cm);\n\t\t\tString[] scoreStrs = result.split(\"[\\\\|]\");\n\t\t\tString predict = scoreStrs[3];\n\t\t\treturn predict;\n\t\t}\n\t}\n\t\n\tpublic class RuleBasedOutputFromFileFeature extends StringFeature<T> {\n\t\tMap<String,String> results;\n\t\tprotected RuleBasedOutputFromFileFeature(Map<String,String> res) {\n\t\t\tsuper(\"RuleBasedOutput\");\n\t\t\tthis.results = res;\n\t\t}\n\t\t@Override\n\t\tpublic String compute(final T cm) {\n\t\t\tString id = cm.getDocument().getId() + \"_\" + cm.getId();\n\t\t\tString ruleOut =results.get(id);\n/*\t\t\tif (ruleOut == null) {\n\t\t\t\truleOut = cm.getMetaData(\"goldSentiment\");\n\t\t\t}*/\n\t\t\treturn ruleOut;\n\t\t}\n\t}\n\t\n\tpublic class ContextDependenciesFeature extends StringSetFeature<T> {\n\t\tprivate final Type type;\n\t\tprotected ContextDependenciesFeature(\n\t\t\t\tfinal Type type) {\n\t\t\tsuper(\"ContextDependencies\");\n\t\t\tthis.type = type;\n\t\t}\n\t\t@Override\n\t\tpublic Set<String> compute(final T span) {\n\t\t\tfinal List<Span> context = span.getContext();\n\t\t\tList<SynDependency> deps = getContextDependencies(span.getDocument(),context);\n\t\t\tfinal Set<String> depStrs = new TreeSet<String>();\n\t\t\tfor (SynDependency d: deps) {\n\t\t\t\t SurfaceElement gov =d.getGovernor();\n\t\t\t\t SurfaceElement dep =d.getDependent();\n\t\t\t\t String govstr = \"\";\n\t\t\t\t\tif (isCitationMention(gov)) {\n\t\t\t\t\t\tgovstr = getCitationString(span,gov);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tgovstr = type.convert(gov);\n\t\t\t\t\t}\t\n\t\t\t\t\tString depstr = \"\";\n\t\t\t\t\tif (isCitationMention(dep)) {\n\t\t\t\t\t\tdepstr = getCitationString(span,dep);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tdepstr = type.convert(dep);\n\t\t\t\t\t}\t\n\t\t\t\t\tdepStrs.add(d.getType() + \"_\" + govstr + \"_\" + depstr);\n\t\t\t}\n\t\t\treturn depStrs;\n\t\t}\n\t}\n\t\t\n\tprivate List<SurfaceElement> findDictionaryPhrases(CitationMention cm, String type) {\n\t\tList<Span> context = cm.getContext();\n\t\tDocument doc = cm.getDocument();\n\t\tList<SurfaceElement> surfs = new ArrayList<>();\n\t\tLinkedHashSet<SemanticItem> phraseSems = Document.getSemanticItemsByClassTypeSpan(doc, Entity.class, Arrays.asList(type),new SpanList(context), false);\n\t\tfor (SemanticItem sem: phraseSems) {\n\t\t\tEntity ent = (Entity)sem;\n\t\t\t String text = ent.getConcepts().iterator().next().getName();\n\t\t\tif (usedTerms == null || usedTerms.containsKey(text))\n\t\t\t\tsurfs.add(ent.getSurfaceElement());\n\t\t}\n\t\treturn surfs;\n\t}\n\t\n\tprivate List<SurfaceElement> getNegationClues(CitationMention cm) {\n\t\tList<SurfaceElement> negs = findDictionaryPhrases(cm,\"NEGATION\");\n\t\treturn negs;\n\t}\n\t\n\tprivate boolean inNegationScope(Document doc,SurfaceElement surf) {\n\t\tSentence s = surf.getSentence();\n\t\tint ind = s.getSurfaceElements().indexOf(surf);\n\t\tif (ind < 1) return false;\n\t\tSurfaceElement prev = s.getSurfaceElements().get(ind-1);\n\t\tif (isNegation(prev)) return true;\n\t\tif (ind >1) {\n\t\t\tSurfaceElement prevprev = s.getSurfaceElements().get(ind-2);\n\t\t\tif (isNegation(prevprev)) return true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\tprivate boolean isNegation(SurfaceElement surf) {\n\t\tLinkedHashSet<SemanticItem> cits = surf.filterSemanticsByClass(Term.class);\n\t\tif (cits == null || cits.size() == 0) return false;\n\t\tfor (SemanticItem s: cits) {\n\t\t\tif (s.getType().equals(\"NEGATION\")) return true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\tprivate List<String> getSemanticString(CitationMention cm, Span context) {\n\t\tList<String> sems = new ArrayList<>();\n\t\tList<SurfaceElement> surfs = cm.getDocument().getSurfaceElementsInSpan(context);\n\t\tfor (SurfaceElement surf: surfs) {\n\t\t\tLinkedHashSet<SemanticItem> es= surf.filterByEntities();\n\t\t\tLinkedHashSet<SemanticItem> nes = new LinkedHashSet<>();\n\t\t\tif (usedTerms != null) {\n\t\t\t\tfor (SemanticItem se: es) {\n\t\t\t\t\tEntity ee = (Entity)se;\n\t\t\t\t\t String text = ee.getConcepts().iterator().next().getName();\n\t\t\t\t\t if (usedTerms.containsKey(text)) {\n\t\t\t\t\t\t nes.add(se);\n\t\t\t\t\t }\n\t\t\t\t}\n\t\t\t} else \n\t\t\t\tnes.addAll(es);\n\t\t\tString s = getCitationString(cm,surf);\n\t\t\tif (s != null) {\n\t\t\t\tsems.add(s);\n\t\t\t\tcontinue;\n\t\t\t} \n\t\t\tif (nes == null || nes.size() ==0) continue;\n//\t\t\tif (isUMLSConcept(surf)) continue;\n\t\t\tEntity ent = (Entity)nes.iterator().next();\n\t\t\tsems.add(ent.getType());\n\t\t\t\n\t\t}\n\t\treturn sems;\n\t}\n\t\n\tprivate boolean isCitationMention(SurfaceElement surf) {\n\t\tLinkedHashSet<SemanticItem> cits = surf.filterSemanticsByClass(CitationMention.class);\n\t\treturn (cits != null && cits.size() > 0);\n\t}\n\t\n\tprivate String getCitationString(CitationMention cm, SurfaceElement token) {\n\t\tif (cm.getSurfaceElement().equals(token)) return \"[TC]\";\n\t\telse if (isCitationMention(token)) return \"[OC]\";\n\t\treturn null;\n\t}\n\t\n\t private List<Span> getSubsumedSubjectMatter(CitationMention cm, Document doc) {\n\t\t List<Span> sps = new ArrayList<>();\n\t\t List<Span> context = cm.getContext();\n\t\t if (context == null || context.size() == 0) return sps;\n\t\t List<Span> sms = getSubjectMatterSpans(doc);\n\t\t for (Span sp: context) {\n\t\t\t for (Span sm: sms) {\n\t\t\t\t if (Span.subsume(sp, sm)) {\n\t\t\t\t\t sps.add(sm);\n\t\t\t\t }\n\t\t\t }\n\t\t }\n\t\t return sps;\n\t }\n\t \n\t private List<Span> getSubjectMatterSpans(Document doc) {\n\t\t List<Span> sps = new ArrayList<Span>();\n\t\t LinkedHashSet<SemanticItem> cits = Document.getSemanticItemsByClass(doc, CitationMention.class);\n\t\t for (SemanticItem sem: cits) {\n\t\t\t CitationMention cit = (CitationMention)sem;\n\t\t\t List<Span> sm = cit.getSubjectMatter();\n\t\t\t if (sm == null) continue;\n\t\t\t sps.addAll(sm);\n\t\t }\n\t\t return sps;\n\t }\n\t \n/*\t private boolean inSubjectMatter(SurfaceElement s, List<Span> spans) {\n\t\t if (spans == null) return false;\n\t\t for (Span sp: spans) {\n\t\t\t if (Span.subsume(sp,s.getSpan().asSingleSpan()) ) return true;\n\t\t }\n\t\t return false;\n\t }\n\t \n\t\tprivate boolean otherSubjectMatter(SurfaceElement s, List<Span> sms) {\n\t\t\tif (sms == null) return false;\n\t\t\tfor (Span sp: sms) {\n\t\t\t\tif (Span.subsume(sp, s.getSpan().asSingleSpan())) return true;\n\t\t\t}\n\t\t\treturn false;\n\t\t}*/\n\n\t private List<SynDependency> getContextDependencies(Document doc, List<Span> context) {\n\t\t List<SynDependency> spDeps = new ArrayList<>();\n\t\t for (Span sp: context) {\n\t\t\t List<Sentence> sents = doc.getAllSubsumingSentences(sp);\n\t\t\t for (Sentence sent: sents) {\n\t\t\t\t List<SynDependency> sds = sent.getEmbeddings();\n\t\t\t\t for (SynDependency sd: sds) {\n\t\t\t\t\t SurfaceElement gov = sd.getGovernor();\n\t\t\t\t\t SurfaceElement dep = sd.getDependent();\n\t\t\t\t\t if (Span.subsume(sp,gov.getSpan().asSingleSpan()) && Span.subsume(sp, dep.getSpan().asSingleSpan())) {\n\t\t\t\t\t\t spDeps.add(sd);\n\t\t\t\t\t }\n\t\t\t\t }\n\t\t\t }\n\t\t }\n\t\t return spDeps;\n\t }\n\t\n}" }, { "alpha_fraction": 0.6937267184257507, "alphanum_fraction": 0.6959493160247803, "avg_line_length": 36.03086471557617, "blob_id": "289f276e7488e1179e74bcc8f3b37c0928747079", "content_id": "d05efd6b3c99159b450cfceb9368e9747c123061", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 17997, "license_type": "no_license", "max_line_length": 148, "num_lines": 486, "path": "/src/gov/nih/nlm/citationsentiment/Utils.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.Collections;\nimport java.util.HashMap;\nimport java.util.LinkedHashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.logging.Logger;\n\nimport gov.nih.nlm.ling.core.ContiguousLexeme;\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.MultiWordLexeme;\nimport gov.nih.nlm.ling.core.Sentence;\nimport gov.nih.nlm.ling.core.Span;\nimport gov.nih.nlm.ling.core.SpanList;\nimport gov.nih.nlm.ling.core.SurfaceElement;\nimport gov.nih.nlm.ling.core.WordLexeme;\nimport gov.nih.nlm.ling.io.XMLReader;\nimport gov.nih.nlm.ling.process.ComponentLoader;\nimport gov.nih.nlm.ling.process.IndicatorAnnotator;\nimport gov.nih.nlm.ling.process.TermAnnotator;\nimport gov.nih.nlm.ling.sem.Entity;\nimport gov.nih.nlm.ling.sem.Indicator;\nimport gov.nih.nlm.ling.sem.Predicate;\nimport gov.nih.nlm.ling.sem.SemanticItem;\nimport gov.nih.nlm.ling.sem.Sense;\nimport gov.nih.nlm.ling.sem.Term;\nimport gov.nih.nlm.ling.util.FileUtils;\n\n\n/** \n * Utility methods to use in processing citation mentions.\n * \n * @author Halil Kilicoglu\n *\n */\npublic class Utils {\n\tprivate static Logger log = Logger.getLogger(Utils.class.getName());\n\t\n\tpublic static Map<Class<? extends SemanticItem>,List<String>> getAnnotationTypes() {\n\t\tMap<Class<? extends SemanticItem>,List<String>> annTypes = new HashMap<Class<? extends SemanticItem>,List<String>>();\n\t\tannTypes.put(CitationMention.class,Arrays.asList(\"CitationMention\"));\n\t\treturn annTypes;\n\t}\n\t\n\tpublic static XMLReader getXMLReader() {\n\t\tXMLReader reader = new XMLReader();\n\t\treader.addAnnotationReader(CitationMention.class, new XMLCitationMentionReader());\n\t\treturn reader;\n\t}\n\t\n/*\tpublic static Map<Class<? extends SemanticItem>,List<String>> getAnnotationTypesMML() {\n\t\tMap<Class<? extends SemanticItem>,List<String>> annTypes = new HashMap<Class<? extends SemanticItem>,List<String>>();\n\t\tannTypes.put(CitationMention.class,Constants.ANNOTATION_TYPES);\n\t\tannTypes.put(Entity.class, Constants.SEMREP_ENTITY_ABRRVS);\n\t\treturn annTypes;\n\t}*/\n\t\n/*\tpublic static XMLReader getXMLReaderMML() {\n\t\tXMLReader reader = new XMLReader();\n\t\treader.addAnnotationReader(CitationMention.class, new XMLCitationMentionReader());\n\t\treader.addAnnotationReader(Entity.class,new XMLEntityReader());\n\t\treturn reader;\n\t}*/\n\t\n\tpublic static Map<String,Map<Integer,List<Integer>>> readFolds(String dir) throws IOException {\n\t\tMap<String,Map<Integer,List<Integer>>> folds = new HashMap<>();\n\t\tList<String> files = FileUtils.listFiles(dir, false, \"txt\");\n\t\tCollections.sort(files);\n\t\tfor (String f: files) {\n\t\t\tif (f.contains(\"test\") == false && f.contains( \"train\") == false) continue;\n\t\t\tString ff = f.substring(f.lastIndexOf(File.separator)+1);\n\n\t\t\tboolean test = ff.startsWith(\"test\");\n\t\t\tint fold = (test? Integer.parseInt(ff.substring(4,5)) : Integer.parseInt(ff.substring(5,6)));\n\t\t\tSystem.out.println(\"FILE: \" + f + \"|\" + ff + \"|\" + fold + \"|\" + (test ? \"test\" : \"train\"));\n\t\t\tList<String> lines = FileUtils.linesFromFile(f, \"UTF-8\");\n\t\t\tList<Integer> allIds = new ArrayList<>();\n\t\t\tfor (String l: lines) {\n\t\t\t\tString[] els = l.split(\"[\\\\t]\");\n\t\t\t\tint id = Integer.parseInt(els[0]);\n\t\t\t\tallIds.add(id);\n\t\t\t}\n\t\t\tif (test) {\n\t\t\t\tMap<Integer,List<Integer>> testFolds = folds.get(\"test\");\n\t\t\t\tif (testFolds == null) {\n\t\t\t\t\ttestFolds = new HashMap<>();\n\t\t\t\t}\n\t\t\t\ttestFolds.put(fold, allIds);\n\t\t\t\tfolds.put(\"test\", testFolds);\n\t\t\t}\n\t\t\telse {\n\t\t\t\tMap<Integer,List<Integer>> trainFolds = folds.get(\"train\");\n\t\t\t\tif (trainFolds == null) {\n\t\t\t\t\ttrainFolds = new HashMap<>();\n\t\t\t\t}\n\t\t\t\ttrainFolds.put(fold, allIds);\n\t\t\t\tfolds.put(\"train\", trainFolds);\n\t\t\t}\n\t\t}\n\t\treturn folds;\n\t}\n\t\n\t/**\n\t * Annotates a given document with a set of provided indicators (triggers) based on lemmas.\n\t * {@link gov.nlm.nih.gov.process.IndicatorAnnotator} class is used. \n\t * \n\t * @param doc\t\tDocument to annotate\n\t * @param indicators\tSet of indicators specified with their lemmas\n\t * @param props\tAdditional properties to pass to the annotator\n\t */\n\tpublic static void annotateIndicators(Document doc, LinkedHashSet<Indicator> indicators, Properties props) {\n\t\tprops.setProperty(\"termAnnotators\",\"gov.nih.nlm.ling.process.IndicatorAnnotator\");\n//\t\tprops.put(\"ignorePOSforIndicators\", \"true\");\n\t\ttry {\n\t\t\tList<TermAnnotator> termAnnotators = ComponentLoader.getTermAnnotators(props);\n\t\t\tfor (TermAnnotator annotator : termAnnotators) {\n\t\t\t\tif (annotator instanceof IndicatorAnnotator) {\n\t\t\t\t\t((IndicatorAnnotator)annotator).setIndicators(indicators);\n\t\t\t\t\tString ignore = props.getProperty(\"ignorePOSforIndicators\");\n\t\t\t\t\tboolean ignorePOS = Boolean.parseBoolean(ignore == null ? \"false\" : ignore);\n\t\t\t\t\t((IndicatorAnnotator)annotator).setIgnorePOS(ignorePOS);\n\t\t\t\t\t((IndicatorAnnotator)annotator).annotateIndicators(doc, props);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (Exception ie) {\n\t\t\tlog.severe(\"Unable to instantiate the trigger annotator.\");\n\t\t\tie.printStackTrace();\n\t\t}\n\t}\n\t\n\t/**\n\t * Annotates a given document with a set of strings from a dictionary.\n\t * {@link StringTermAnnotator} is used. \n\t * \n\t * @param doc\t\tDocument to annotate\n\t * @param dictionaryItems\t\tDictionary of strings to annotate with\n\t * @param props\tAdditional properties to pass to the annotator\n\t */\n\tpublic static void annotateStrings(Document doc, Map<String,String>dictionaryItems, Properties props) {\n\t\tprops.setProperty(\"termAnnotators\",\"gov.nih.nlm.citationsentiment.StringTermAnnotator\");\n\t\ttry {\n\t\tList<TermAnnotator> termAnnotators = ComponentLoader.getTermAnnotators(props);\n\t\t\tfor (TermAnnotator annotator : termAnnotators) {\n\t\t\t\tif (annotator instanceof StringTermAnnotator) {\n\t\t\t\t\t((StringTermAnnotator)annotator).setDictionaryItems(dictionaryItems);\n\t\t\t\t\t((StringTermAnnotator)annotator).annotateTerms(doc,props);\n\t\t\t\t}\n\t\t\t}\n\t\t} catch (Exception ie) {\n\t\t\tlog.severe(\"Unable to instantiate string annotator.\");\n\t\t\tie.printStackTrace();\n\t\t}\n\t}\n\n\t/**\n\t * Removes predicates subsumed by other terms from the document.\n\t * \n\t * @param doc\tDocument to process\n\t */\n\tpublic static void removeSubsumedPredicates(Document doc) {\n\t\tList<SemanticItem> toRemove = new ArrayList<SemanticItem>();\n\t\tLinkedHashSet<SemanticItem> terms = Document.getSemanticItemsByClass(doc, Predicate.class);\n\t\tfor (SemanticItem term : terms) {\n\t\t\tPredicate t = (Predicate)term;\n//\t\t\tIndicator ind = t.getIndicator();\n\t\t\tSurfaceElement su = t.getSurfaceElement();\n\t\t\tLinkedHashSet<SemanticItem> suTerms = su.filterByPredicates();\n\t\t\tfor (SemanticItem suT: suTerms) {\n\t\t\t\tif (suT.equals(t) || toRemove.contains(suT)) continue;\n//\t\t\t\tif (indicators.contains(ind) == false) continue;\n\t\t\t\tif (SpanList.subsume(suT.getSpan(), t.getSpan()) && suT.getSpan().length() > t.getSpan().length()) {\n\t\t\t\t\ttoRemove.add(t);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor (SemanticItem rem: toRemove) {\n\t\t\tlog.finest(\"Removing subsumed predicate \" + rem.toShortString());\n\t\t\tdoc.removeSemanticItem(rem);\n\t\t}\n\t}\n\t\n\t/**\n\t * Removes all terms subsumed by others from the given document.\n\t * \n\t * @param doc\tDocument to process\n\t */\n\tpublic static void removeSubsumedTerms(Document doc) {\n\t\tList<SemanticItem> toRemove = new ArrayList<SemanticItem>();\n\t\tLinkedHashSet<SemanticItem> terms = Document.getSemanticItemsByClass(doc, Term.class);\n\t\tfor (SemanticItem term : terms) {\n\t\t\tTerm t = (Term)term;\n\t\t\tSurfaceElement su = t.getSurfaceElement();\n\t\t\tLinkedHashSet<SemanticItem> suTerms = su.filterByTerms();\n\t\t\tfor (SemanticItem suT: suTerms) {\n\t\t\t\tif (suT.equals(t) || toRemove.contains(suT)) continue;\n//\t\t\t\tif (indicators.contains(ind) == false) continue;\n\t\t\t\tif (SpanList.subsume(suT.getSpan(), t.getSpan()) && suT.getSpan().length() > t.getSpan().length()) {\n\t\t\t\t\ttoRemove.add(t);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor (SemanticItem rem: toRemove) {\n\t\t\tlog.finest(\"Removing subsumed term \" + rem.toShortString());\n\t\t\tdoc.removeSemanticItem(rem);\n\t\t}\n\t}\n\t\n\t/**\n\t * Removes all entities subsumed by other terms from the given document.\n\t * \n\t * @param doc\tDocument to process\n\t */\n\tpublic static void removeSubsumedEntities(Document doc) {\n\t\tList<SemanticItem> toRemove = new ArrayList<SemanticItem>();\n\t\tLinkedHashSet<SemanticItem> terms = Document.getSemanticItemsByClass(doc, Entity.class);\n\t\tfor (SemanticItem term : terms) {\n\t\t\tEntity t = (Entity)term;\n\t\t\tSurfaceElement su = t.getSurfaceElement();\n\t\t\tLinkedHashSet<SemanticItem> suTerms = su.filterByEntities();\n\t\t\tfor (SemanticItem suT: suTerms) {\n\t\t\t\tif (suT.equals(t) || toRemove.contains(suT)) continue;\n//\t\t\t\tif (indicators.contains(ind) == false) continue;\n\t\t\t\tif (SpanList.subsume(suT.getSpan(), t.getSpan()) && suT.getSpan().length() > t.getSpan().length()) {\n\t\t\t\t\ttoRemove.add(t);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor (SemanticItem rem: toRemove) {\n\t\t\tlog.finest(\"Removing subsumed entity \" + rem.toShortString());\n\t\t\tdoc.removeSemanticItem(rem);\n\t\t}\n\t}\n\t\n\t/**\n\t * Returns whether a given sentence starts with a contrastive marker, such as <i>although</i>, <i>however</i>.\n\t * The sentence is expected to have been annotated with all relevant terms already (CONTRAST type is relevant \n\t * in this case).\n\t * \n\t * @param sentence\t\tSentence to assess\n\t */\n\tpublic static boolean sentenceInitialContrastive(Sentence sentence) {\n\t\tif (sentence.getSurfaceElements() == null) return false;\n\t\tSurfaceElement f = sentence.getSurfaceElements().get(0);\n\t\tLinkedHashSet<SemanticItem> es = f.filterByEntities();\n\t\tif (es == null || es.size() ==0) return false;\n\t\tfor (SemanticItem e: es) {\n\t\t\tif (e.getType().equals(\"CONTRAST\")) return true;\n\t\t}\n\t\treturn false;\n\t}\n\t\n\t/**\n\t * Returns the context for a given citation mention. Currently, it is either the current sentence only \n\t * or the current sentence plus the next, unless the next sentence starts with a contrastive marker.\n\t * \n\t * @param mention\t\tMention in question\n\t * @param currentSentenceOnly\tWhether to limit to the current sentence\n\t * @return\t\tA list of character offsets, which collectively define the context.\n\t */\n\t public static List<Span> getCitationContext(CitationMention mention, boolean currentSentenceOnly) {\n\t\t List<Span> context = new ArrayList<>();\n\t\t Sentence sent = mention.getSurfaceElement().getSentence();\n\t\t Document doc = mention.getDocument();\n\t\t context.add(sent.getSpan());\n\t\t if (currentSentenceOnly) return context;\n\t\t int ind = doc.getSentences().indexOf(sent);\n\t\t if (ind == doc.getSentences().size() -1) return context;\n\t\t Sentence next = doc.getSentences().get(ind+1);\n\t\t Span nextSpan = next.getSpan();\n\t\t LinkedHashSet<SemanticItem> nextMentions = Document.getSemanticItemsByClassSpan(doc, CitationMention.class, new SpanList(nextSpan), false);\n\t\t if (nextMentions == null || nextMentions.size() == 0) {\n\t\t\t if (sentenceInitialContrastive(next)) {\n\t\t\t\t context.add(nextSpan);\n\t\t\t }\n\t\t } \n\t\t return context;\n\t }\n \n\t /**\n\t * Returns clause level contexts, if relevant. If the mention is the only one in a sentence, \n\t * the context is simply the sentence. If not, a mention's context is the span between the previous \n\t * citation mention and the current mention. \n\t * \n\t * \n\t * @param m\n\t * @return\n\t */\n\t public static List<Span> getCitationContextSubSentential(CitationMention m) {\n\t\t List<Span> context = new ArrayList<>();\n\t\t SurfaceElement surf = m.getSurfaceElement();\n\t\t Sentence sent =surf.getSentence();\n\t\t Document doc = m.getDocument();\n\t\t Span u = new Span(sent.getSpan().getBegin(),m.getSpan().getBegin());\n\t\t LinkedHashSet<SemanticItem> prevMentions = Document.getSemanticItemsByClassSpan(doc, CitationMention.class, new SpanList(u), false);\n\t\t LinkedHashSet<SemanticItem> allMentions = Document.getSemanticItemsByClassSpan(doc, CitationMention.class, new SpanList(sent.getSpan()), false);\n\t\t LinkedHashSet<SemanticItem> sameMention = Document.getSemanticItemsByClassSpan(doc, CitationMention.class, m.getSpan(), false);\n\t\t // only citation in the sentence\n\t\t if (allMentions.size() == 1 || (sameMention.size() == allMentions.size())) {\n\t\t\t context.add(sent.getSpan());\n\t\t\t return context;\n\t\t }\n\t\t // span up to the mention only\n\t\t if (prevMentions.size() == 0) {\n\t\t\t context.add(new Span(sent.getSpan().getBegin(),m.getSpan().getBegin()));\n\t\t }\n\t\t // span from the previous mention up to the current mention only\n\t\t else {\n\t\t\t SpanList closest = null;\n\t\t\t for (SemanticItem mm: prevMentions) {\n\t\t\t\t if (closest == null || SpanList.atLeft(closest,mm.getSpan())) closest = mm.getSpan();\n\t\t\t }\n\t\t\t context.add(new Span(closest.getEnd(),m.getSpan().getBegin()));\n\t\t }\n\t\t int ind = doc.getSentences().indexOf(sent);\n\t\t if (ind == doc.getSentences().size() -1) return context;\n\t\t Sentence next = doc.getSentences().get(ind+1);\n\t\t Span nextSpan = next.getSpan();\n\t\t LinkedHashSet<SemanticItem> nextMentions = Document.getSemanticItemsByClassSpan(doc, CitationMention.class, new SpanList(nextSpan), false);\n\t\t if (nextMentions == null || nextMentions.size() == 0) {\n\t\t\t if (sentenceInitialContrastive(next)) {\n\t\t\t\t context.add(nextSpan);\n\t\t\t }\n\t\t } \n\t\t return context; \n\t }\n\t \n\t /**\n\t * Loads a dictionary of sentiment terms (lemma-based).\n\t * \n\t * @param filename\tThe dictionary file\n\t * @return\tA set of indicators \n\t * @throws IOException\n\t */\n\tpublic static LinkedHashSet<Indicator> loadSentimentDictionary(String filename) throws IOException {\n\t\t\t LinkedHashSet<Indicator>dictionary = new LinkedHashSet<>();\n\t\tfinal List<String> lines = FileUtils.linesFromFile(filename, \"UTF8\");\n\t\tfor (String l: lines) {\n//\t\t\tSystem.out.println(\"LINE:\" + l);\n\t\t\tif (l.startsWith(\"#\")) continue;\n\t\t\tString[] els = l.split(\"\\t\");\n\t\t\tString text = els[0];\n\t\t\tString pos = els[1];\n\t\t\tString cat = els[2];\n\t\t\tString[] textsubs = text.split(\"[ ]+\");\n\t\t\tString[] possubs = pos.split(\"[ ]+\");\n\t\t\tList<WordLexeme> lexs = new ArrayList<>();\n\t\t\tfor (int i=0; i < textsubs.length; i++) {\n\t\t\t\tlexs.add(new WordLexeme(textsubs[i],possubs[i]));\n\t\t\t}\n\t\t\tList<ContiguousLexeme> indlexs = new ArrayList<>();\n\t\t\tindlexs.add(new MultiWordLexeme(lexs));\n\t\t\t\n\t\t\tIndicator ind = new Indicator(text,indlexs,true,Arrays.asList(new Sense(cat)));\n\t\t\tdictionary.add(ind);\n\t\t}\n\t\treturn dictionary;\n\t}\n\t\n\t/**\n\t * Loads a simple term dictionary.\n\t * \n\t * @param filename\tThe dictionary file\n\t * @return\n\t * @throws IOException\n\t */\n\tpublic static Map<String,String> loadTermDictionary(String filename ) throws IOException {\n\t\tMap<String,String> dictionary = new HashMap<>();\n\t\tfinal List<String> lines = FileUtils.linesFromFile(filename, \"UTF8\");\n\t\tfor (String l: lines) {\n\t\t\t//\t\tSystem.out.println(\"LINE:\" + l);\n\t\t\tif (l.startsWith(\"#\")) continue;\n\t\t\tString[] els = l.split(\"\\t\");\n\t\t\tString text = els[0];\n\t\t\tString type = els[1];\n\t\t\tdictionary.put(text,type);\n\t\t}\n\t\tlog.fine(\"Loaded \" + dictionary.size() + \" triggers from \" + filename);\n\t\treturn dictionary;\n\t}\n\t\n\t/**\n\t * Loads only dictionary items with specific types.\n\t * \n\t * @param filename\tThe dictionary file\n\t * @param types\t\tSemantic types to load\n\t * @return\n\t * @throws IOException\n\t */\n\tpublic static Map<String,String> loadTermDictionary(String filename, List<String> types ) throws IOException {\n\t\tMap<String,String> dictionary = new HashMap<>();\n\t\tfinal List<String> lines = FileUtils.linesFromFile(filename, \"UTF8\");\n\t\tfor (String l: lines) {\n\t\t\tif (l.startsWith(\"#\")) continue;\n\t\t\tString[] els = l.split(\"\\t\");\n\t\t\tString text = els[0];\n\t\t\tString type = els[1];\n\t\t\tif (types == null || types.contains(type)) \n\t\t\t\tdictionary.put(text,type);\n\t\t}\n\t\tlog.fine(\"Loaded \" + dictionary.size() + \" triggers from \" + filename);\n\treturn dictionary;\n}\n\t\n\t/**\n\t * Returns whether the surface unit is a citation mention.\n\t * \n\t * @param surf\tThe surface unit in question\n\t * @return\n\t */\n\tpublic static boolean isCitationMention(SurfaceElement surf) {\n\t\tLinkedHashSet<SemanticItem> cits = surf.filterSemanticsByClass(CitationMention.class);\n\t\treturn (cits != null && cits.size() > 0);\n\t}\n\t\n\t/** \n\t * Returns whether a citation mention should be treated as the current instance or not..\n\t * \n\t * @param mention\tThe citation mention in question\n\t * @param token\t\tThe citation token\n\t * @return THISCITATION if the citation mention is associated with the token, OTHERCITATION if not, \n\t * \t\t\t\t\tnull if token is not associated with any citation.\n\t */\n\tpublic static String getCitationString(CitationMention mention, SurfaceElement token) {\n\t\tif (mention.getSurfaceElement().equals(token)) return \"THISCITATION \";\n\t\telse if (isCitationMention(token)) return \"OTHERCITATION \";\n\t\treturn null;\n\t}\n\t\n\t/**\n\t * Loads results of rule-based method from a file. Useful for extracting ML features.\n\t * \n\t * @param filename\tThe file containing the rule-based method results\n\t * \n\t * @return\ta Map of IDs and citation sentiment values\n\t */\n\tpublic static Map<String,String> loadRuleBasedResultsFromFile(String filename) {\n\t\tMap<String,String> results = new HashMap<>();\n\t\ttry {\n\t\tList<String> lines = FileUtils.linesFromFile(filename, \"UTF-8\");\n\t\tfor (String l: lines) {\n\t\t\tString[] els = l.split(\"\\\\|\");\n\t\t\tif (els.length < 5) continue;\n\t\t\tString a = els[4];\n\t\t\tString[] els1 = a.split(\"_\");\n\t\t\tString id = els1[0];\n\t\t\tString cid = els1[1];\n//\t\t\truleBasedScores.put(id + \"_\" + cid, Double.parseDouble(els[2]));\n\t\t\tresults.put(id+ \"_\" + cid, els[0]);\n\t\t}\n\t\t} catch (IOException ioe) {\n\t\t\tlog.severe(\"Rule-based sentiment result file cannot be opened.\");\n\t\t}\n\t\treturn results;\n\t}\n\t\n/*\tpublic static LinkedHashSet<String> loadClinicalTermDictionary(String filename) throws IOException {\n\t\tList<String> termlist = new ArrayList<>();\n\t\tfinal List<String> lines = FileUtils.linesFromFile(filename, \"UTF8\");\n\t\tfor (String l: lines) {\n\t\t\tif (l.startsWith(\"#\")) continue;\n\t\t\ttermlist.add(l.trim());\n\t\t}\n\t\tCollections.sort(termlist,new Comparator<String>() {\n\t\t\tpublic int compare(String a, String b) {\n\t\t\t\tint al = a.length();\n\t\t\t\tint bl = b.length();\n\t\t\t\tif (al == bl) return a.compareTo(b);\n\t\t\t\treturn (bl-al);\n\t\t\t}\n\t\t});\n\t\treturn new LinkedHashSet<String>(termlist);\n\t}*/\n\t\n\n\t\n\n}\n" }, { "alpha_fraction": 0.8319088220596313, "alphanum_fraction": 0.8589743375778198, "avg_line_length": 38.05555725097656, "blob_id": "867e35a036b6fca9cc605dc6f9074f4598f0dab2", "content_id": "b3f22f644057d8526c0cc247e33bed7cc51aa505", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 702, "license_type": "no_license", "max_line_length": 76, "num_lines": 18, "path": "/citation.properties", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "sentenceSegmenter=gov.nih.nlm.ling.process.NewlineSentenceSegmenter\nannotators=tokenize,ssplit,pos,lemma\n\nsentimentAllDirectory=X:\\\\CitationSentiment\\\\XML_SIMPLE_W_CONTEXT_SM\nsentimentTrainDirectory=X:\\\\CitationSentiment\\\\XML_SIMPLE_W_CONTEXT_SM_TRAIN\nsentimentTestDirectory=X:\\\\CitationSentiment\\\\XML_SIMPLE_W_CONTEXT_SM_TEST\n\ntermDictionary=resources/term_285.dic\ntermDictionaryTrain=resources/term_228.dic\nscoreFile=resources/sentiment_scores_228.txt\nruleBasedResultFile=resources/rule_based_all_121818.txt\n\nfeatureDirectory=features\nfeatureFile=features/feature_list_4539.dic\nlogDirectory=log\n\nfoldDirectory:X:\\\\CitationSentiment\\\\TenFoldData\nfoldLogDirectory=X:\\\\CitationSentiment\\\\FoldLog_NN_Test" }, { "alpha_fraction": 0.7327738404273987, "alphanum_fraction": 0.7345406413078308, "avg_line_length": 36.733333587646484, "blob_id": "068fc71a73a1f9211b26cc94b0803ae3aa62e438", "content_id": "f3df2625e3a403f4efb49f835fdda79dae57acc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2264, "license_type": "no_license", "max_line_length": 107, "num_lines": 60, "path": "/src/gov/nih/nlm/citationsentiment/XMLCitationMentionReader.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\n\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\n\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.Sentence;\nimport gov.nih.nlm.ling.core.SpanList;\nimport gov.nih.nlm.ling.io.XMLEntityReader;\nimport gov.nih.nlm.ling.sem.SemanticItem;\nimport nu.xom.Element;\n\n/**\n * Class to read citation mention information from XML.\n * \n * @author Halil Kilicoglu\n *\n */\npublic class XMLCitationMentionReader extends XMLEntityReader {\n\tprivate static final Logger log = Logger.getLogger(XMLCitationMentionReader.class.getName());\n\t\n\tpublic XMLCitationMentionReader() {}\n\n\t@Override\n\tpublic SemanticItem read(Element element, Document doc) {\n\t\tString id = element.getAttributeValue(\"id\");\n\t\tString spStr = element.getAttributeValue(\"charOffset\");\n\t\tString headSpStr = element.getAttributeValue(\"headOffset\");\n\t\tString sentiment = element.getAttributeValue(\"sentiment\");\n\t\tString refType = element.getAttributeValue(\"refType\");\n\t\tString text = element.getAttributeValue(\"text\");\n\t\tSpanList sp = new SpanList(spStr);\n\t\tSpanList headSp = null;\n\t\tif (headSpStr != null) headSp = new SpanList(headSpStr);\n\t\tSentence sent = doc.getSubsumingSentence(sp.getSpans().get(0));\n\t\tif (sent == null) {\n\t\t\tlog.log(Level.WARNING,\"No sentence can be associated with the XML: {0}\", new Object[]{element.toXML()});\n\t\t\treturn null;\n\t\t}\n\t\tCitationFactory sif = (CitationFactory)doc.getSemanticItemFactory();\n\t\tCitationMention cm = sif.newCitationMention(doc, refType, sp, headSp, text);\n\t\tcm.setId(id);\n\t\tcm.setSentiment(CitationMention.Sentiment.valueOf(sentiment));\n\t\tsent.synchSurfaceElements(cm.getSurfaceElement());\n\t\tsent.synchEmbeddings();\n\t \tlog.log(Level.FINEST,\"Generated entity {0} with the head {1}. \", \n\t \t\t\tnew String[]{cm.toString(), cm.getSurfaceElement().getHead().toString()});\n\t String contextSpan = element.getAttributeValue(\"context\");\n\t if (contextSpan != null) {\n\t\t\t SpanList contextSp = new SpanList(contextSpan);\n\t\t\t cm.setContext(contextSp.getSpans());\n\t }\n\t String subjectMatterSpan = element.getAttributeValue(\"subjectMatter\");\n\t if (subjectMatterSpan != null) {\n\t\t\t SpanList smSp = new SpanList(subjectMatterSpan);\n\t\t\t cm.setSubjectMatter(smSp.getSpans());\n\t }\n\t \treturn cm;\n\t}\n}\n" }, { "alpha_fraction": 0.6021073460578918, "alphanum_fraction": 0.607124924659729, "avg_line_length": 29.619047164916992, "blob_id": "7a88b867217fab2c7aebd8e5a38751cd7e5b2e7d", "content_id": "da8de81db03d38d170342ea87799c9abf5c9ee2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ant Build System", "length_bytes": 1993, "license_type": "no_license", "max_line_length": 122, "num_lines": 63, "path": "/build.xml", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "<?xml version=\"1.0\"?>\r\n<project name=\"clinical-citation-sentiment\" default=\"main\" basedir=\".\">\r\n <!-- Sets variables which can later be used. -->\r\n <!-- The value of a property is accessed via ${} -->\r\n <property name=\"src.dir\" location=\"src\" />\r\n <property name=\"build.dir\" location=\"classes\" />\r\n <property name=\"dist.dir\" location=\"dist\" />\r\n <property name=\"docs.dir\" location=\"docs\" />\r\n\r\n\r\n <path id=\"class-path\">\r\n<!-- \t<pathelement location=\"classes\"/> -->\r\n <pathelement location=\"lib/bioscores-2.0.1.jar\"/>\r\n <pathelement location=\"lib/liblinear-1.7-with-deps.jar\"/>\t\r\n <pathelement location=\"lib/ml.jar\"/> \r\n <pathelement location=\"lib/util.jar\"/> \r\n <pathelement location=\"lib/xom.jar\"/>\r\n <pathelement location=\"lib/stanford-corenlp-3.3.1.jar\"/> \t\r\n </path>\r\n \t\r\n <!-- Deletes the existing build, docs and dist directory-->\r\n <target name=\"clean\">\r\n <delete dir=\"${build.dir}\" />\r\n <delete dir=\"${docs.dir}\" />\r\n <delete dir=\"${dist.dir}\" />\r\n </target>\r\n\r\n <!-- Creates the build, docs and dist directory-->\r\n <target name=\"makedir\" >\r\n <mkdir dir=\"${build.dir}\" />\r\n <mkdir dir=\"${docs.dir}\" />\r\n <mkdir dir=\"${dist.dir}\" />\r\n </target>\r\n\r\n <!-- Compiles the java code (including the usage of library for JUnit -->\r\n\t\r\n <target name=\"prepare\">\r\n\t<mkdir dir=\"${build.dir}\" />\r\n </target>\t\t\r\n\t\r\n\t\t\r\n <target name=\"compile\" depends=\"prepare\">\r\n\t\t<javac srcdir=\"${src.dir}\" destdir=\"${build.dir}\" debug=\"true\" includeantruntime=\"false\" debuglevel=\"lines,vars,source\">\r\n\t\t\t<classpath refid=\"class-path\"></classpath>\r\n\t\t</javac>\r\n </target>\t\r\n\t\r\n\t<target name=\"jar\" depends=\"compile\">\r\n\t\t<jar destfile=\"${dist.dir}/citationsentiment.jar\">\r\n\t\t\t<manifest>\r\n\t\t\t</manifest>\r\n\t\t\t<fileset dir=\"${build.dir}\" includes=\"gov/nih/nlm/citationsentiment/\" />\r\n\t\t</jar>\r\n\t\t\r\n\t </target>\t\r\n\r\n\r\n\t\r\n <target name=\"main\" depends=\"clean, makedir, compile, jar\">\r\n <description>Main target</description>\r\n </target>\r\n\r\n</project> \r\n" }, { "alpha_fraction": 0.6513645648956299, "alphanum_fraction": 0.6541637778282166, "avg_line_length": 26.129920959472656, "blob_id": "c07cdacc587ead6d3f34aab0dceeb2180ea317a2", "content_id": "22d777f2cf6dfa2e8b8959527fd4964c05e8eea1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7145, "license_type": "no_license", "max_line_length": 119, "num_lines": 254, "path": "/src/gov/nih/nlm/citationsentiment/CitationMention.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\r\n\r\nimport java.util.HashMap;\r\nimport java.util.LinkedHashSet;\r\nimport java.util.List;\r\nimport java.util.Map;\r\nimport java.util.Set;\r\n\r\nimport gov.nih.nlm.ling.core.Span;\r\nimport gov.nih.nlm.ling.core.SpanList;\r\nimport gov.nih.nlm.ling.core.SurfaceElement;\r\nimport gov.nih.nlm.ling.sem.AbstractTerm;\r\nimport gov.nih.nlm.ling.sem.Ontology;\r\nimport gov.nih.nlm.util.attr.Attributes;\r\nimport gov.nih.nlm.util.attr.HasAttributes;\r\nimport nu.xom.Attribute;\r\nimport nu.xom.Element;\r\n\r\n/**\r\n * A representation of a mention of a citation in text. A mention is associated with a context span (\r\n * in the simplest case, the span of the sentence it is in) and may consist of of several \r\n * references (e.g., <i>[1-2]</i>). \r\n * \r\n * @author Halil Kilicoglu\r\n *\r\n */\r\npublic class CitationMention extends AbstractTerm implements HasAttributes {\r\n\t\r\n\tpublic enum Sentiment {\r\n\t\tPOSITIVE, NEGATIVE, NEUTRAL, NONE;\r\n\t}\r\n\t\r\n//\tprivate Citation citation;\r\n\tprivate Sentiment sentiment;\r\n\tprivate Map<String,String> metaDataMap;\r\n\tprivate Attributes attrs;\r\n\tprivate List<Span> context;\r\n\tprivate List<Span> subjectMatter;\r\n\tprivate List<Span> simplifiedContext;\r\n\t\r\n\tpublic CitationMention(String id) {\r\n\t\tsuper(id);\r\n\t}\r\n\r\n\tpublic CitationMention(String id, String type, SpanList sp) {\r\n\t\tsuper(id, type, sp);\r\n\t\tsentiment = Sentiment.NONE;\r\n\t}\r\n\r\n\tpublic CitationMention(String id, String type, SpanList sp, SurfaceElement se) {\r\n\t\tsuper(id, type, sp, se);\r\n\t\tsentiment = Sentiment.NONE;\r\n\t}\r\n\r\n\tpublic CitationMention(String id, String type, SpanList sp, SpanList headSp, SurfaceElement se) {\r\n\t\tsuper(id, type, sp, headSp, se);\r\n\t\tsentiment = Sentiment.NONE;\r\n\t}\r\n\t\r\n\tpublic CitationMention(String id, String type, SpanList sp, SpanList headSp, SurfaceElement se, Sentiment sentiment) {\r\n\t\tthis(id, type, sp, headSp, se);\r\n\t\tthis.sentiment = sentiment;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic Set<String> getSemtypes() {\r\n\t\treturn getAllSemtypes();\r\n\t}\r\n\r\n\t@Override\r\n\tpublic LinkedHashSet<String> getAllSemtypes() {\r\n\t\tLinkedHashSet<String> sems = new LinkedHashSet<>();\r\n//\t\tsems.add(citation.getType());\r\n\t\tsems.add(\"CitationSentiment\");\r\n\t\treturn sems;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic String toShortString() {\r\n\t\t// TODO Auto-generated method stub\r\n\t\treturn null;\r\n\t}\r\n\t\r\n\t\r\n\tpublic void setAttrs(Attributes attrs) {\r\n\t\tthis.attrs = attrs;\r\n\t}\r\n\r\n\tpublic void setContext(List<Span> context) {\r\n\t\tthis.context = context;\r\n\t}\r\n\r\n\tpublic void setSubjectMatter(List<Span> subj) {\r\n\t\tthis.subjectMatter = subj;\r\n\t}\r\n\t\r\n\tpublic void setSimplifiedContext(List<Span> context) {\r\n\t\tthis.simplifiedContext = context;\r\n\t}\r\n\t\r\n\tpublic Attributes getAttrs() {\r\n\t\treturn attrs;\r\n\t}\r\n\r\n\tpublic List<Span> getContext() {\r\n\t\treturn context;\r\n\t}\r\n\t\r\n\tpublic List<Span> getSubjectMatter() {\r\n\t\treturn subjectMatter;\r\n\t}\r\n\t\r\n\tpublic List<Span> getSimplifiedContext() {\r\n\t\treturn simplifiedContext;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic Element toXml() {\r\n\t\tElement el = new Element(\"Term\");\r\n\t\tel.addAttribute(new Attribute(\"xml:space\", \r\n\t\t \"http://www.w3.org/XML/1998/namespace\", \"preserve\"));\r\n\t\tel.addAttribute(new Attribute(\"id\",id));\r\n\t\tel.addAttribute(new Attribute(\"type\",\"CitationMention\"));\r\n\t\tel.addAttribute(new Attribute(\"refType\",type));\r\n\t\tel.addAttribute(new Attribute(\"charOffset\",span.toString()));\r\n\t\tif (headSpan == null) {\r\n\t\t\tel.addAttribute(new Attribute(\"headOffset\",surfaceElement.getHead().getSpan().toString()));\r\n\t\t} else {\r\n\t\t\tel.addAttribute(new Attribute(\"headOffset\",headSpan.toString()));\r\n\t\t}\r\n//\t\tif (citation != null) \r\n//\t\t\tel.addAttribute(new Attribute(\"citation\",citation.getId()));\r\n\t\tif (sentiment != null) \r\n\t\t\tel.addAttribute(new Attribute(\"sentiment\",sentiment.toString()));\r\n\t\tif (context != null) {\r\n\t\t\tel.addAttribute(new Attribute(\"context\",new SpanList(context).toString()));\r\n\t\t}\r\n\t\tif (subjectMatter != null) {\r\n\t\t\tel.addAttribute(new Attribute(\"subjectMatter\",new SpanList(subjectMatter).toString()));\r\n\t\t}\r\n\t\tif (features != null) {\r\n\t\t\tfor (String s: features.keySet()) {\r\n\t\t\t\tel.addAttribute(new Attribute(s,features.get(s).toString()));\r\n\t\t\t}\r\n\t\t}\r\n\t\tel.addAttribute(new Attribute(\"text\",getText()));\r\n\t\t\r\n\t\treturn el;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic Ontology getOntology() {\r\n//\t\treturn getReference();\r\n\t\treturn null;\r\n\t}\r\n\r\n\t@Override\r\n\tpublic boolean ontologyEquals(Object obj) {\r\n\t\tif (obj instanceof CitationMention == false) return false;\r\n\t\tCitationMention ment = (CitationMention)obj;\r\n\t\treturn (ment.getDocument().getId().equals(getDocument().getId()) && ment.getId().equals(getId()));\r\n//\t\tCitation cit = ment.getCitation();\r\n//\t\treturn (cit.getReference().equals(getReference()));\r\n\t}\r\n\t\r\n\tpublic Reference getReference() {\r\n//\t\treturn citation.getReference();\r\n\t\treturn null;\r\n\t}\r\n\t\r\n/*\tpublic Citation getCitation() {\r\n\t\treturn citation;\r\n\t}\r\n\t\r\n\tpublic void setCitation(Citation citation) {\r\n\t\tthis.citation = citation;\r\n\t}*/\r\n\t\r\n\tpublic Sentiment getSentiment() {\r\n\t\treturn sentiment;\r\n\t}\r\n\r\n\tpublic void setSentiment(Sentiment sentiment) {\r\n\t\tthis.sentiment = sentiment;\r\n\t}\r\n\t\r\n\t@Override\r\n\tpublic int hashCode() {\r\n\t\treturn \r\n\t ((id == null ? 89 : id.hashCode()) ^\r\n\t (type == null ? 97 : type.hashCode()) ^\r\n\t (getText() == null ? 103: getText().hashCode()) ^\r\n\t (span == null ? 119 : span.hashCode())); // ^\r\n//\t (citation == null ? 139: citation.hashCode()));\r\n\t}\r\n\t\r\n\t/**\r\n\t * Equality on the basis of type and mention equality.\r\n\t */\r\n\t@Override\r\n\tpublic boolean equals(Object obj) {\r\n\t\tif (obj == null) return false;\r\n\t\tif (this == obj) return true;\r\n\t\tif (getClass() != obj.getClass()) return false;\r\n\t\tCitationMention at = (CitationMention)obj;\r\n\t\treturn (id.equals(at.getId()) &&\r\n\t\t\t\ttype.equals(at.getType()) &&\r\n\t\t\t\tgetText().equals(at.getText()) &&\r\n\t\t\t\tspan.equals(at.getSpan())); // &&\r\n//\t\t\t\tcitation.equals(at.getCitation());\r\n\t}\r\n\t\r\n\tpublic Map<String, String> getMetaDataMap() {\r\n\t\treturn metaDataMap;\r\n\t}\r\n\tpublic void setMetaDataMap(Map<String, String> metadataMap) {\r\n\t\tthis.metaDataMap = metadataMap;\r\n\t}\r\n\t\r\n\tpublic void setMetaData(String key, String value) {\r\n\t\tif (metaDataMap == null) metaDataMap = new HashMap<String,String>();\r\n\t\tmetaDataMap.put(key, value);\r\n\t}\r\n\t\r\n\tpublic String getMetaData(String key) {\r\n\t\treturn metaDataMap.get(key);\r\n\t}\r\n\t\r\n\t public Attributes getAttributes() {\r\n\t\t if (attrs == null) {\r\n\t\t attrs = new Attributes();\r\n\t\t }\r\n\t\t return attrs;\r\n\t\t }\r\n\t \r\n\t public String toString() {\r\n\t\t StringBuffer buf = new StringBuffer();\r\n//\t\t buf.append(getDocument().getId() + \"_\" + getId() + \"_\" + getText() + \"_\" + sentiment.toString() + \"\\n\");\r\n\t\t buf.append(getDocument().getId() + \"_\" + getId() + \"_\" + getText() + \"_\" + sentiment.toString()+\"_\");\r\n\t\t if (context == null) return buf.toString();\r\n\t\t for (Span s: context) {\r\n\t\t\t buf.append(\" \" + getDocument().getStringInSpan(s));\r\n\t\t }\r\n/*\t\t if (subjectMatter == null) return buf.toString();\r\n\t\t for (Span s: subjectMatter) {\r\n\t\t\t buf.append(\" \" + getDocument().getStringInSpan(s));\r\n\t\t }*/\r\n/*\t\t for (Span s: context) {\r\n\t\t\t buf.append(\"\\tCONTEXT:\" + getDocument().getStringInSpan(s) + \"\\n\");\r\n\t\t }*/\r\n\t\t return buf.toString();\r\n\t }\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7409682273864746, "alphanum_fraction": 0.7658959627151489, "avg_line_length": 40.2835807800293, "blob_id": "0d84eef7e8cecf76024d09c18f87fbb9ea657969", "content_id": "b03fcd88d958a112a38114ecd92d475b21fcabad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2768, "license_type": "no_license", "max_line_length": 261, "num_lines": 67, "path": "/README.md", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "# clinical-citation-sentiment\nThis repository contains source code and models related to the publication:\n- Kilicoglu H, Peng Z, Tafreshi S, Tran T, Rosemblat G, Schneider J. *Confirm or refute?: A comparative study on citation sentiment classification in clinical research publications*. Journal of biomedical informatics. 2019;91:103123. (<https://doi.org/10.1016/j.jbi.2019.103123>) \n\n## Prerequisites\n\n- Java 1.8\n- Python 2.7\n- TensorFlow 1.10.1\n\nTo run python scripts, you will need:\n- numpy 1.14.5\n- sklearn 0.20.0\n- cPickle 1.71\n- gensim 3.6.0\n\n## Directory Structure\n\n`src` directory contains Java code related to the rule-based method as well as generation of hand-crafted features for the neural network (NN) model.\n\n`scripts` directory contains Python scripts for predicting citation sentiment using the best NN model, as well as a script for rule-based prediction.\n\n`lib` directory contains third-party libraries required by the system (see Note regarding Stanford Core NLP below.)\n\n`dist` directory contains the JAR file.\n\n`resources` directory contains rule-based method evaluation results as well as dictionaries used.\n\n`best_model` directory contains the best NN model.\n\n`data` directory contains all data files that are needed for running the Python scripts. (You need to download the compressed data file from <https://uofi.box.com/s/mffn2h6a0vh24futhynmoleelyjkcze9> and unzip it in top level directory)\n\nThe top level directory contains properties file used by the package, as well as test files.\n\n\n## Usage\n\nAll Python scripts should be run in the top level directory.\n- Generalization test replicates the experiment with the best model on the held-out test set, reported in Table 7 of the paper\n (Accuracy=0.882, MacroF1=0.721). \n \n```\n python scripts/generalization_test.py\n```\n\n- Prediction script performs citation sentiment analysis on a document. The document needs to be one sentence per line with citations marked (see `text.txt` file for an example). The input arguments are the path of input and output file path.\n \n``` \n python scripts/predict.py test.txt nn_predict_results.txt\n```\n\nPrediction can be also be performed with the rule-based method, which has overall a lower performance. The input and output arguments are the same as above. The output should match `test.out`. \n\n``` \n scripts/ruleBasedPrediction.sh test.txt rule_predict_results.txt\n```\n\n\n## Note on Stanford CoreNLP package\n\nStanford CoreNLP model jar file that is needed for processing raw text for lexical and syntactic information (`stanford-corenlp-3.3.1-models.jar`) is\nnot included with the distribution due to its size. It can be downloaded from <http://stanfordnlp.github.io/CoreNLP/> and copied to lib directory.\n\n\n## Contact\n\n- Halil Kilicoglu: [[email protected]](mailto:[email protected])\n\n\n" }, { "alpha_fraction": 0.7216590046882629, "alphanum_fraction": 0.7238709926605225, "avg_line_length": 31.878787994384766, "blob_id": "354ebce109af38ab8dd09010ecab48477c68adc1", "content_id": "cb68a8224ab0c95cb13fc7b7c6ad1bbf0fa81098", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5425, "license_type": "no_license", "max_line_length": 124, "num_lines": 165, "path": "/src/gov/nih/nlm/citationsentiment/StringTermAnnotator.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\n\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.Comparator;\nimport java.util.HashMap;\nimport java.util.LinkedHashMap;\nimport java.util.LinkedHashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.logging.Level;\nimport java.util.logging.Logger;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.MultiWord;\nimport gov.nih.nlm.ling.core.SpanList;\nimport gov.nih.nlm.ling.core.Word;\nimport gov.nih.nlm.ling.process.TermAnnotator;\nimport gov.nih.nlm.ling.sem.Concept;\nimport gov.nih.nlm.ling.sem.Ontology;\nimport gov.nih.nlm.ling.sem.SemanticItemFactory;\n\n/**\n * An annotator for strings from a dictionary.\n * \n * @author Halil Kilicoglu\n *\n */\npublic class StringTermAnnotator implements TermAnnotator {\n\tprivate static Logger log = Logger.getLogger(StringTermAnnotator.class.getName());\t\n\t\n\tpublic static final Comparator<String> LENGTH_ORDER = \n\t new Comparator<String>() {\n\t\t\tpublic int compare(String s1, String s2) {\n\t\t\t\tif (s1.length() > s2.length()) return -1;\n\t\t\t\telse if (s2.length() > s1.length()) return 1;\n\t\t\t\treturn s1.toLowerCase().compareTo(s2.toLowerCase());\n\t\t\t}\t\n\t\t};\n\t\t\n\tprivate Map<String,String> dictionaryItems;\n\tprivate boolean allowMultipleAnnotations = true;\n\tprivate boolean postHyphenMatch = true;\n\t\t\n\tpublic StringTermAnnotator() {}\n\t\n\t/**\n\t * Constructs an <code>StringTermAnnotator</code> with a list of simple dictionary terms\n\t * \n\t * @param indicators\tthe set of indicators\n\t */\n\tpublic StringTermAnnotator(Map<String,String> dictionaryItems) {\n\t\tthis.dictionaryItems = dictionaryItems;\n\t}\n\t\n\tpublic Map<String,String> getDictionaryItems() {\n\t\treturn dictionaryItems;\n\t}\n\n\tpublic void setDictionaryItems(Map<String,String> dictionaryItems) {\n\t\tthis.dictionaryItems = dictionaryItems;\n\t}\n\t\n\t/**\n\t * Returns whether multiple indicator annotations are allowed over the same text.\n\t * \n\t * @return true if this variable has been set.\n\t */\n\tpublic boolean allowMultipleAnnotations() {\n\t\treturn allowMultipleAnnotations;\n\t}\n\n\tpublic void setAllowMultipleAnnotations(boolean allowMultipleAnnotations) {\n\t\tthis.allowMultipleAnnotations = allowMultipleAnnotations;\n\t}\n\t\n\t/**\n\t * Returns whether, if the token is hyphenated, the post-hyphen substring is allowed to be an indicator\n\t * \n\t * @return true if this variable has been set.\n\t */\n\tpublic boolean postHyphenMatch() {\n\t\treturn postHyphenMatch;\n\t}\n\n\tpublic void setPostHyphenMatch(boolean postHyphenMatch) {\n\t\tthis.postHyphenMatch = postHyphenMatch;\n\t}\n\n\t@Override \n\tpublic void annotate(Document document, Properties props,\n\t\t\tMap<SpanList,LinkedHashSet<Ontology>> annotations) {\n\t\tif (dictionaryItems == null)\n\t\t\tthrow new IllegalStateException(\"No dictionary terms have been loaded for annotation.\");\n\t\tMap<String,String> termMap = new HashMap<>(dictionaryItems);\n\t\t// Annotate the larger indicators first\n\t\tList<String> termList = new ArrayList<>(termMap.keySet());\n\t\tCollections.sort(termList,LENGTH_ORDER);\n\t\tfor (String term: termList) {\n\t\t\tlog.log(Level.FINE,\"Annotating term: {0}\", new Object[]{term.toString()});\n\t\t\tannotateTerm(document,term,termMap.get(term),annotations);\n\t\t}\n\t}\n\n\t/**\n\t * Annotates a given <code>Document</code> with the loaded indicators and \n\t * creates the corresponding <code>Predicate</code> objects for the mentions, as well.\n\t * \n\t * @param document\tthe document to annotate\n\t * @param props\t\tthe properties to use for annotation\n\t */\n\tpublic void annotateTerms(Document document, Properties props) {\n\t\tMap<SpanList,LinkedHashSet<Ontology>> map = new LinkedHashMap<>();\n\t\tannotate(document,props,map);\n\t\tSemanticItemFactory sif = document.getSemanticItemFactory();\n\t\tfor (SpanList sp: map.keySet()) {\n\t\t\tList<Word> words = document.getWordsInSpan(sp);\n\t\t\tSpanList headSpan = null;\n\t\t\tif (words.size() > 1) \n\t\t\t\theadSpan = MultiWord.findHeadFromCategory(words).getSpan();\n\t\t\telse \n\t\t\t\theadSpan = sp;\n\t\t\tLinkedHashSet<Ontology> inds = map.get(sp);\n\t\t\tfor (Ontology ont: inds) {\n\t\t\t\tConcept conc = (Concept)ont;\n\t\t\t\tif (conc == null) {\n\t\t\t\t\tsif.newEntity(document, sp, headSpan,null);\n\t\t\t\t} else {\n\t\t\t\t\tString type = conc.getSemtypes().iterator().next();\n\t\t\t\t\tLinkedHashSet<Concept> concs = new LinkedHashSet<Concept>();\n\t\t\t\t\tconcs.add(conc);\n\t\t\t\t\tsif.newEntity(document, sp, headSpan, type, concs, conc);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t\n\t/**\n\t * Annotates terms specified as simple lowercase strings. If the word being examined is hyphenated, it attempts\n\t * to annotate the indicator on part of the word, as well.\n\t * \n\t * @param document \tthe document\n\t * @param term\t\t\t\tterm to annotate\n\t * \t@param term\t\t\t\tterm type\n\t * @param annotations\tthe updated annotations list\n\t */\n\tpublic void annotateTerm(Document document, String term, String type, Map<SpanList, LinkedHashSet<Ontology>> annotations) {\n\t\tif (document.getText() == null) return;\n\t\tPattern p = Pattern.compile(\"\\\\b\" + Pattern.quote(term.toLowerCase()) + \"(\\\\b|\\\\p{Punct})\");\n\t\tString lw = document.getText().toLowerCase();\n\t\tMatcher m = p.matcher(lw);\n\t\tLinkedHashSet<String> types = new LinkedHashSet<>();\n\t\ttypes.add(type);\n\t\twhile (m.find()) {\n\t\t\tSpanList sp = new SpanList(m.start(),m.end());\n\t\t\tConcept conc = new Concept(\"\",term,types);\n\t\t\tLinkedHashSet<Ontology> concs = new LinkedHashSet<Ontology>();\n\t\t\tconcs.add(conc);\n\t\t\tannotations.put(sp, concs);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.5980024933815002, "alphanum_fraction": 0.6074906587600708, "avg_line_length": 36.42990493774414, "blob_id": "276bda10ab99a91dd261a38c292042528afcd3ef", "content_id": "2e8b2aee26cbdb4b96354af5e43b100b952c5276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4005, "license_type": "no_license", "max_line_length": 132, "num_lines": 107, "path": "/scripts/predict.py", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "# this is used for predicting citation sentiments for new dataset\n#\n# Usage: python predict.py [input_file_path] [output_file_path]\n#\n# Input file requirements:\n# One sentence per line\n# Citations must be tagged with <cit> and </cit> where they are occurred.\n# The number of words in the sentence should be less or equal to 166(based on our training set), any longer string will be trimmed\n#\n# Python package requirements:\n# python 2.7\n# tensorflow\n# sklearn\n#\nimport sys\nimport numpy as np\nfrom sklearn.decomposition import PCA\nimport tensorflow as tf\nfrom subprocess import *\nimport cPickle\n\ndef main():\n if len(sys.argv) != 3:\n print \"Usage: python predict.py [input_file_path] [output_file_path]\"\n return\n input_file_path = sys.argv[1]\n result_file_path = sys.argv[2]\n\n internal_file_path = 'data/preprocessing.txt'\n word_vocab_path = 'data/word_vocab.p'\n pos_vocab_path = 'data/pos_vocab.p'\n seqlen = 166\n\n # Preprocess the input text to generate necessary inputs for neural network\n process = Popen(['java', '-jar', 'data/genfeas.jar', input_file_path, internal_file_path], stdout=PIPE, stderr=PIPE)\n stdout, stderr = process.communicate()\n process.wait()\n\n results = open(\"results.txt\", 'wb')\n cit_tags, word_idx, pos_idx, add_features = parse_results(internal_file_path, word_vocab_path, pos_vocab_path, seqlen)\n mean = cPickle.load(open('data/mean.p', 'rb'))\n transform_matrix = cPickle.load(open('data/transform_matrix.p', 'rb'))\n add_features = [map(lambda x: float(x),feature) for feature in add_features]\n add_features = np.subtract(add_features, mean)\n reduced_features = np.dot(add_features, transform_matrix)\n\n sess = tf.Session()\n saver = tf.train.import_meta_graph('best_model/best_model.ckpt.meta')\n saver.restore(sess, 'best_model/best_model.ckpt')\n graph = tf.get_default_graph()\n y_pred = graph.get_tensor_by_name('y_pred:0')\n dropout_keep = graph.get_tensor_by_name('dropout_keep:0')\n x_input = graph.get_tensor_by_name('x_input:0')\n x_pos_input = graph.get_tensor_by_name('x_pos_input:0')\n mlf_input = graph.get_tensor_by_name('mlf_input:0')\n feed_dict = {dropout_keep: 1, x_input: word_idx, x_pos_input: pos_idx,\n mlf_input: reduced_features}\n y_pred = sess.run(y_pred, feed_dict)\n labels = ['NEGATIVE', 'NEUTRAL','POSITIVE']\n y_out = [ labels[z] for z in np.argmax(y_pred,axis=-1) ]\n write_result(result_file_path, y_out, cit_tags)\n\ndef parse_results(path, word, pos, seqlen):\n word_vocab = cPickle.load(open(word, 'rb'))\n pos_vocab = cPickle.load(open(pos, 'rb'))\n f = open(path, 'rb')\n cit_tags = []\n word_idx = []\n pos_idx = []\n feas = []\n for line in f:\n if line and line.strip():\n tokens = line.split(\"|\")\n cit_tag = tokens[0]\n words = tokens[1].split(\" \")\n if len(words) > 166:\n words = words[0:166]\n pos = tokens[2].split(\" \")\n if len(pos) > 166:\n pos = pos[0:166]\n features = tokens[3].split(\",\")\n\n words, pos = apply_unks(words, pos, word_vocab, pos_vocab)\n cit_tags.append(cit_tag)\n widx = ([ word_vocab.index(z) for z in words ]\n + [0] * (seqlen - len(words)))\n word_idx.append(widx)\n pidx = ([ pos_vocab.index(z) for z in pos ]\n + [0] * (seqlen - len(pos)))\n pos_idx.append(pidx)\n feas.append(features)\n\n return cit_tags, word_idx, pos_idx, feas\n\ndef write_result(path, y_out, tags):\n f = open(path, 'wb')\n for i in range(len(y_out)):\n f.write(tags[i] + \" \" + y_out[i] + \"\\n\")\n f.close()\n\ndef apply_unks(words, pos, word_vocab, pos_vocab):\n new_words = [ w if w in word_vocab else 'UNK' for w in words ]\n new_pos = [ p if p in pos_vocab else 'UNK' for p in pos ]\n return new_words, new_pos\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6416568756103516, "alphanum_fraction": 0.6443923115730286, "avg_line_length": 33.70697784423828, "blob_id": "7ef0ee87a301a64041e52c1a20482144197bae86", "content_id": "48ae386f8224e568b000e75ff033876a535fe72c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7677, "license_type": "no_license", "max_line_length": 126, "num_lines": 215, "path": "/src/gov/nih/nlm/citationsentiment/Reference.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\r\n\r\nimport java.util.ArrayList;\r\nimport java.util.List;\r\n\r\nimport org.w3c.dom.NamedNodeMap;\r\nimport org.w3c.dom.Node;\r\nimport org.w3c.dom.NodeList;\r\n\r\nimport gov.nih.nlm.ling.sem.Ontology;\r\nimport nu.xom.Attribute;\r\nimport nu.xom.Element;\r\nimport nu.xom.Elements;\r\n\r\n/**\r\n * A representation of an article reference. \r\n * \r\n * @author Halil Kilicoglu\r\n *\r\n */\r\npublic class Reference implements Ontology {\r\n\r\n\tprivate String type;\r\n\tprivate String title;\r\n\tprivate String source;\r\n\tprivate String year;\r\n\tprivate String volume;\r\n\tprivate String firstPage;\r\n\tprivate String lastPage;\r\n\tprivate String pmcId;\r\n\tprivate String pubmedId;\r\n\tprivate String doi;\r\n\tprivate List<String> authors;\r\n\t\r\n\tpublic Reference() {\r\n\t\t// TODO Auto-generated constructor stub\r\n\t}\r\n\t\r\n\tpublic Reference(Element el) {\r\n\t\tif (el.getLocalName().equals(\"citation\") == false && el.getLocalName().equals(\"element-citation\") == false && \r\n\t\t\t\tel.getLocalName().equals(\"mixed-citation\") == false) \r\n\t\t\tthrow new IllegalArgumentException(\"Not a citation XML element.\");\r\n\t\ttype = el.getAttributeValue(\"citation-type\");\r\n\t\tif (type == null) \r\n\t\t\ttype = el.getAttributeValue(\"publication-type\");\r\n\t\ttitle = el.getFirstChildElement(\"article-title\").getValue();\r\n\t\tsource = el.getFirstChildElement(\"source\").getValue();\r\n\t\tyear = el.getFirstChildElement(\"year\").getValue();\r\n\t\tvolume = el.getFirstChildElement(\"volume\").getValue();\r\n\t\tfirstPage = el.getFirstChildElement(\"fpage\").getValue();\r\n\t\tlastPage = el.getFirstChildElement(\"lpage\").getValue();\r\n\t\tElements ids = el.getChildElements(\"pub-id\");\r\n\t\tfor (int i=0; i < ids.size(); i++) {\r\n\t\t\tElement idel = ids.get(i);\r\n\t\t\tString t = idel.getAttributeValue(\"pub-id-type\");\r\n\t\t\tif (t.equals(\"pmid\")) pubmedId = idel.getValue();\r\n\t\t\telse if (t.equals(\"doi\")) doi = idel.getValue();\r\n\t\t\telse if (t.equals(\"pmc\")) pmcId = idel.getValue();\r\n\t\t}\r\n\t\tElements authorsEl = el.getChildElements(\"person-group\");\r\n\t\tauthors = new ArrayList<>();\r\n\t\tfor (int i=0; i < authorsEl.size(); i++) {\r\n\t\t\tElement author = authorsEl.get(i);\r\n\t\t\tString avalue = author.getAttributeValue(\"person-group-type\");\r\n\t\t\tif (avalue == null || avalue.equals(\"author\")) {\r\n\t\t\t\tElements names = author.getChildElements(\"name\");\r\n\t\t\t\tfor (int j=0; j < names.size(); j++) {\r\n\t\t\t\t\tElement name = names.get(i);\r\n\t\t\t\t\tString surname = name.getChildElements(\"surname\").get(0).getValue();\r\n\t\t\t\t\tString given = name.getChildElements(\"given-names\").get(0).getValue();\r\n\t\t\t\t\tString fullname = surname + \", \" + given;\r\n\t\t\t\t\tauthors.add(fullname);\r\n\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t\r\n\tpublic Reference(Node el) {\r\n\t\tString name = el.getNodeName();\r\n\t\tif (name.equals(\"citation\") == false && name.equals(\"element-citation\") == false && name.equals(\"mixed-citation\") == false) \r\n\t\t\tthrow new IllegalArgumentException(\"Not a citation XML element.\");\r\n NamedNodeMap refAttributes = el.getAttributes();\r\n if (refAttributes.getNamedItem(\"citation-type\") == null) {\r\n\t\t\ttype = refAttributes.getNamedItem(\"publication-type\").getNodeValue();\r\n } else\r\n \ttype = refAttributes.getNamedItem(\"citation-type\").getNodeValue();\r\n\r\n\t\tNodeList children = el.getChildNodes();\r\n\t\tfor (int l=0; l < children.getLength(); l++){\r\n\t\t\tNode child = children.item(l);\r\n\t\t\tNamedNodeMap childAtts = child.getAttributes(); \r\n\t\t\tif (child.getNodeName().equals(\"article-title\")) title = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"source\")) source = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"year\")) year = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"volume\")) volume = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"fpage\")) firstPage = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"lpage\")) lastPage = child.getTextContent();\r\n\t\t\telse if (child.getNodeName().equals(\"pub-id\")) {\r\n\t\t\t\tString t = childAtts.getNamedItem(\"pub-id-type\").getNodeValue();\r\n\t\t\t\tif (t.equals(\"pmid\")) pubmedId = child.getTextContent();\r\n\t\t\t\telse if (t.equals(\"doi\")) doi = child.getTextContent();\r\n\t\t\t\telse if (t.equals(\"pmc\")) pmcId = child.getTextContent();\r\n\t\t\t} \r\n\t\t\telse if (child.getNodeName().equals(\"person-group\")) {\r\n\t\t\t\tNode persontype = childAtts.getNamedItem(\"person-group-type\");\r\n\t\t\t\tif (persontype == null || persontype.getNodeValue().equals(\"author\")) {\r\n\t\t\t\t\tauthors = new ArrayList<>();\r\n\t\t\t\t\tNodeList names = child.getChildNodes();\r\n\t\t\t\t\tfor (int j=0; j < names.getLength(); j++) {\r\n\t\t\t\t\t\tNode namej = names.item(j);\t\r\n\t\t\t\t\t\tNodeList subnames = namej.getChildNodes();\r\n\t\t\t\t\t\tString surname = \"\";\r\n\t\t\t\t\t\tString given = \"\";\r\n\t\t\t\t\t\tfor (int k=0; k < subnames.getLength(); k++) {\r\n\t\t\t\t\t\t\tNode sname = subnames.item(k);\r\n\t\t\t\t\t\t\tif (sname.getNodeName().equals(\"surname\")) surname = sname.getTextContent();\r\n\t\t\t\t\t\t\tif (sname.getNodeName().equals(\"given-names\")) given = sname.getTextContent();\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tStringBuffer buf = new StringBuffer();\r\n\t\t\t\t\t\tif (surname.equals(\"\") == false) {\r\n\t\t\t\t\t\t\tbuf.append(surname);\r\n\t\t\t\t\t\t\tbuf.append(\",\");\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\tif (given.equals(\"\") == false) buf.append(given);\r\n\t\t\t\t\t\tString fullname = buf.toString();\r\n\t\t\t\t\t\tif (fullname.equals(\"\") == false) authors.add(fullname);\r\n\t\t\t\t\t}\r\n\t\t\t\t}\r\n\t\t\t} \r\n\t\t}\r\n\t}\r\n\r\n\tpublic String getType() {\r\n\t\treturn type;\r\n\t}\r\n\r\n\tpublic String getTitle() {\r\n\t\treturn title;\r\n\t}\r\n\r\n\tpublic String getSource() {\r\n\t\treturn source;\r\n\t}\r\n\r\n\tpublic String getYear() {\r\n\t\treturn year;\r\n\t}\r\n\r\n\tpublic String getVolume() {\r\n\t\treturn volume;\r\n\t}\r\n\r\n\tpublic String getFirstPage() {\r\n\t\treturn firstPage;\r\n\t}\r\n\r\n\tpublic String getLastPage() {\r\n\t\treturn lastPage;\r\n\t}\r\n\r\n\tpublic String getPmcId() {\r\n\t\treturn pmcId;\r\n\t}\r\n\r\n\tpublic String getPubmedId() {\r\n\t\treturn pubmedId;\r\n\t}\r\n\r\n\tpublic String getDoi() {\r\n\t\treturn doi;\r\n\t}\r\n\r\n\tpublic List<String> getAuthors() {\r\n\t\treturn authors;\r\n\t}\r\n\t\r\n\tpublic boolean equals(Object obj) {\r\n\t\tif (obj == null) return false;\r\n\t\tif (this == obj) return true;\r\n\t\tif (getClass() != obj.getClass()) return false;\r\n\t\tReference ref = (Reference)obj;\r\n\t\tString refPubmedId = ref.getPubmedId();\r\n\t\tString refPmcId = ref.getPmcId();\r\n\t\tString refDoi = ref.getDoi();\r\n\t\treturn ((pmcId != null && refPmcId != null && pmcId.equals(refPmcId)) ||\r\n\t\t\t\t(pubmedId != null && refPubmedId != null && pubmedId.equals(refPubmedId)) ||\r\n\t\t\t\t(doi != null && refDoi != null && doi.equals(refDoi)));\r\n\t}\r\n\t\r\n\tpublic int hashCode() {\r\n\t\tif (pmcId != null) return pmcId.hashCode();\r\n\t\tif (pubmedId != null) return pubmedId.hashCode();\r\n\t\tif (doi != null) return doi.hashCode();\r\n\t return ((title == null ? 119 : title.hashCode()) ^\r\n\t \t\t(source == null ? 89: source.hashCode()) ^\r\n\t \t\t(firstPage == null ? 59 : firstPage.hashCode()) ^\r\n\t \t\t(lastPage == null ? 79 : lastPage.hashCode()));\r\n\t}\r\n\t\r\n\tpublic Element toXml() {\r\n\t\tElement el = new Element(\"Reference\");\r\n\t\tif (pubmedId != null) el.addAttribute(new Attribute(\"pubmedId\",pubmedId));\r\n\t\tif (pmcId != null) el.addAttribute(new Attribute(\"pmcId\",pmcId));\r\n\t\tif (doi != null) el.addAttribute(new Attribute(\"doi\",doi));\r\n\t\tif (type != null) el.addAttribute(new Attribute(\"type\",type));\r\n\t\tif (title != null) el.addAttribute(new Attribute(\"title\",title));\r\n\t\tif (source != null) el.addAttribute(new Attribute(\"source\",source));\r\n\t\tif (year != null) el.addAttribute(new Attribute(\"year\",year));\r\n\t\tif (volume != null) el.addAttribute(new Attribute(\"volume\",volume));\r\n\t\tif (firstPage != null) el.addAttribute(new Attribute(\"firstPage\",firstPage));\r\n\t\tif (lastPage != null) el.addAttribute(new Attribute(\"lastPage\",lastPage));\r\n\t\tif (authors != null && authors.size() > 0) el.addAttribute(new Attribute(\"authors\",String.join(\";\", authors))); \r\n\t\treturn el;\r\n\t}\r\n}\r\n" }, { "alpha_fraction": 0.5682975053787231, "alphanum_fraction": 0.5823670625686646, "avg_line_length": 36.53950881958008, "blob_id": "b18bac994b6b7f11f39051d1939b4862df01d497", "content_id": "d20870f2995cba2bb11c664191deed4a55eca573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14144, "license_type": "no_license", "max_line_length": 128, "num_lines": 367, "path": "/scripts/generalization_test.py", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "# this is used to test the generalizability of the method\r\n\r\nimport os, sys, time, random, re, json, collections\r\nimport sklearn.metrics\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport cPickle\r\nfrom gensim.models import KeyedVectors, Word2Vec\r\n\r\nNeuralModel = collections.namedtuple('NeuralModel', ['word_vocab', 'pos_vocab',\r\n 'labels', 'seqlen', 'sess', 'saver', 'dropout_keep', 'x_input', 'x_pos_input', 'mlf_input',\r\n 'x_length', 'y_true','y_out','y_pred', 'y_loss','train_step'])\r\n\r\ndef main():\r\n\r\n dep_word_embeddings_path = 'data/dbwe_dict.p'\r\n pos_embeddings_path = 'data/pos_embedding_model_on_additional_biotext_corpus.model'\r\n ensemble_count = 20\r\n num_epoch = 20\r\n batch_size = 16\r\n\r\n trainset = load_data('data/train_data.txt', 'data/train_pos.txt', 'data/train_label.p', 'data/train_feature.p')\r\n oritestset = load_data('data/test_data.txt', 'data/test_pos.txt', 'data/test_label.p', 'data/test_feature.p')\r\n\r\n label_dist, seqlen_mean, seqlen_max = data_stats(trainset)\r\n print(\"Label distribution> {}\".format(label_dist))\r\n print(\"Token length> Mean={} Max={}\".format(seqlen_mean,seqlen_max))\r\n\r\n word_vocab, pos_vocab = build_vocab(trainset)\r\n word_embeddings, pos_embeddings = load_embeddings(dep_word_embeddings_path, pos_embeddings_path, word_vocab, pos_vocab)\r\n\r\n print(\"Training word vocab> Length={} : {}..\".format(len(word_vocab),', '.join(word_vocab[:10])))\r\n trainset = apply_unks(trainset, word_vocab, pos_vocab)\r\n testset = apply_unks(oritestset, word_vocab, pos_vocab)\r\n\r\n print('@ Training set',len(trainset), ' @ Testing set', len(testset))\r\n label_dist, _, _ = data_stats(testset)\r\n print(\"Test fold distribution> {}\".format(label_dist))\r\n\r\n\r\n models = []\r\n for j in range(ensemble_count):\r\n model = train_model(trainset, word_vocab, pos_vocab, word_embeddings, pos_embeddings, seqlen_max, num_epoch, batch_size)\r\n models.append(model)\r\n\r\n print(\"Test Evaluation> \")\r\n tf1, tp, tr, tacc, probs, max_micro, min_micro = eval_model(models, testset, avg='micro')\r\n print(\" Overall: micro-f1={:.2%} p={:.2%} r={:.2%} acc={:.2%}\".format(tf1,tp,tr,tacc))\r\n\r\n tf1, tp, tr, tacc, _, max_macro, min_macro = eval_model(models, testset)\r\n print(\" Overall: macro-f1={:.2%} p={:.2%} r={:.2%} acc={:.2%}\".format(tf1,tp,tr,tacc))\r\n\r\n tf1, tp, tr, _, _, _, _ = eval_model(models, testset,labels=['POSITIVE'])\r\n print(\" > Positive: f1={:.2%} p={:.2%} r={:.2%}\".format(tf1,tp,tr))\r\n\r\n tf1, tp, tr, _, _, _, _ = eval_model(models, testset,labels=['NEUTRAL'])\r\n print(\" > Neutral: f1={:.2%} p={:.2%} r={:.2%} \".format(tf1,tp,tr))\r\n\r\n tf1, tp, tr, _, _, _, _ = eval_model(models, testset,labels=['NEGATIVE'])\r\n print(\" > Negative: f1={:.2%} p={:.2%} r={:.2%}\".format(tf1,tp,tr))\r\n\r\n\r\n\r\n\r\ndef get_fold(i, n, inverse=False, seed=0):\r\n assert(i >= 0 and i < n)\r\n random.seed(seed)\r\n indices = random.sample(list(range(n)), n)\r\n ssize = int(n/10)\r\n if inverse:\r\n return indices[:i*ssize] + indices[(i+1)*ssize:]\r\n else:\r\n return indices[i*ssize:(i+1)*ssize]\r\n\r\ndef train_model(trainset, word_vocab, pos_vocab, word_embeddings, pos_embeddings, seqlen_max, num_epoch, batch_size):\r\n # Build the model\r\n labels = ['NEGATIVE', 'NEUTRAL','POSITIVE']\r\n model = new_model(word_vocab, pos_vocab, labels, seqlen_max,\r\n word_embeddings=word_embeddings, pos_embeddings=pos_embeddings)\r\n\r\n # Train the model\r\n dropout_keep = 0.5\r\n tmproot = 'tmp'\r\n\r\n # random.shuffle(trainset)\r\n devcount = int(len(trainset) * 0.20)\r\n trainsplit = trainset[:-devcount]\r\n devsplit = trainset[-devcount:]\r\n trainsample = random.sample(trainsplit,len(devsplit))\r\n\r\n print('@ Training split',len(trainsplit), ' @ Development split', len(devsplit))\r\n\r\n sess_id = int(time.time())\r\n best_df1 = -1\r\n best_model = None\r\n print('Num. Epochs: {}, Batch Size: {}'.format(num_epoch,batch_size))\r\n for ep in range(1,num_epoch+1):\r\n\r\n aloss = 0\r\n for i in range(0,len(trainsplit),batch_size):\r\n minibatch = trainsplit[i:i+batch_size]\r\n batch_feed = compile_examples(model, minibatch, keep_prob=dropout_keep)\r\n _, loss = model.sess.run([model.train_step,model.y_loss], batch_feed)\r\n aloss = aloss * 0.8 + loss * 0.2\r\n print(\"epoch {}> iter {}> {}/{} loss {} \"\r\n .format(ep, int(i/batch_size), i, len(trainsplit), aloss))\r\n\r\n bf1, _, _, _, _, _, _ = eval_model(model,trainsample,num_samples=1,sample_size=1)\r\n df1, dp, dr, _, _, _, _ = eval_model(model,devsplit,num_samples=1,sample_size=1)\r\n if df1 > best_df1:\r\n best_df1 = df1\r\n best_model = \"{}/model-{}-ep{}.ckpt\".format(tmproot,sess_id,ep)\r\n\r\n if not os.path.exists(tmproot):\r\n os.mkdir(tmproot)\r\n\r\n model.saver.save(model.sess, best_model)\r\n marker = '*'\r\n else:\r\n marker = ' '\r\n\r\n print(\"epoch {}> loss {} fit> f={:.2%} dev> f={:.2%} p={:.2%} r={:.2%} {}\"\r\n .format(ep, aloss, bf1, df1, dp, dr, marker))\r\n\r\n print(\"Restoring best model: {}\".format(best_model))\r\n model.saver.restore(model.sess, best_model)\r\n return model\r\n\r\ndef data_stats(dataset):\r\n label_freq = {}\r\n token_lengths = []\r\n\r\n for (tokens, mlfs, pos), y in dataset:\r\n if y in label_freq:\r\n label_freq[y] += 1\r\n else:\r\n label_freq[y] = 1\r\n\r\n token_lengths.append(len(tokens))\r\n\r\n return label_freq, np.mean(token_lengths), np.max(token_lengths)\r\n\r\ndef eval_model(models, examples, avg='macro', labels=None, num_samples=20, sample_size=10):\r\n assert(sample_size > 0 and sample_size <= len(models))\r\n if not isinstance(models,list):\r\n models = [models]\r\n\r\n if labels is None:\r\n target_labels = models[0].labels\r\n else:\r\n target_labels = labels\r\n\r\n y_out_store = []\r\n for model in models:\r\n feed_dict = compile_examples(model,examples)\r\n y_true, y_out_probs = model.sess.run([model.y_true, model.y_pred], feed_dict)\r\n y_out_store.append([y_true, y_out_probs])\r\n\r\n y_true = [ model.labels[z] for z in np.argmax(y_true,axis=-1) ]\r\n\r\n f1s = []\r\n precisions = []\r\n recalls = []\r\n accs = []\r\n\r\n for i in range(0, num_samples):\r\n y_out = np.zeros((len(examples),len(models[0].labels)))\r\n y_outs = random.sample(y_out_store, sample_size)\r\n for y_t, y_ in y_outs:\r\n y_out += y_\r\n\r\n y_pred = [ model.labels[z] for z in np.argmax(y_out,axis=-1) ]\r\n\r\n f1 = sklearn.metrics.f1_score(y_true, y_pred,\r\n average=avg, labels=target_labels)\r\n precision = sklearn.metrics.precision_score(y_true, y_pred,\r\n average=avg, labels=target_labels)\r\n recall = sklearn.metrics.recall_score(y_true, y_pred,\r\n average=avg, labels=target_labels)\r\n acc = sklearn.metrics.accuracy_score(y_true, y_pred)\r\n\r\n f1s.append(f1)\r\n precisions.append(precision)\r\n recalls.append(recall)\r\n accs.append(acc)\r\n\r\n return np.mean(f1s), np.mean(precisions), np.mean(recalls), np.mean(accs), y_out_store, max(f1s), min(f1s)\r\n\r\n\r\n\r\ndef apply_unks(dataset, word_vocab, pos_vocab):\r\n new_dataset = []\r\n for (tokens, mlfs, positions), y in dataset:\r\n tokens = [ w if w in word_vocab else 'UNK' for w in tokens ]\r\n positions = [ p if p in pos_vocab else 'UNK' for p in positions ]\r\n new_dataset.append(((tokens, mlfs, positions), y))\r\n\r\n return new_dataset\r\n\r\ndef build_vocab(dataset):\r\n word_freq = {}\r\n pos_vocab = []\r\n for (tokens, mlfs, pos), y in dataset:\r\n for w in tokens:\r\n if w not in word_freq:\r\n word_freq[w] = 1\r\n else:\r\n word_freq[w] += 1\r\n\r\n for p in pos:\r\n if p not in pos_vocab:\r\n pos_vocab.append(p)\r\n\r\n word_vocab = ['ZERO','UNK','THISCITATION','OTHERCITATION']\r\n word_vocab += sorted([ w for w, freq in word_freq.items()\r\n if freq >= 1 and w not in word_vocab ])\r\n pos_vocab += ['UNK']\r\n\r\n return word_vocab, pos_vocab\r\n\r\ndef compile_examples(model, examples, keep_prob=1):\r\n bx_length = []\r\n bx_input = []\r\n bx_pos_input = []\r\n by_true = []\r\n ml_features = []\r\n\r\n for (tokens, mlfs, positions), y in examples:\r\n bx_length.append(len(tokens))\r\n\r\n tidx = ([ model.word_vocab.index(z) for z in tokens ]\r\n + [0] * (model.seqlen - len(tokens)))\r\n bx_input.append(tidx)\r\n\r\n pidx = ([ model.pos_vocab.index(z) for z in positions ]\r\n + [0] * (model.seqlen - len(positions)))\r\n bx_pos_input.append(pidx)\r\n\r\n onehot = np.zeros(len(model.labels))\r\n onehot[model.labels.index(y)] = 1\r\n by_true.append(onehot)\r\n ml_features.append(mlfs)\r\n\r\n feed_dict = { model.x_input : np.array(bx_input),\r\n model.x_pos_input : np.array(bx_pos_input),\r\n model.mlf_input : np.array(ml_features),\r\n model.x_length : np.array(bx_length),\r\n model.y_true : np.array(by_true),\r\n model.dropout_keep : keep_prob }\r\n\r\n return feed_dict\r\n\r\ndef new_model(word_vocab, pos_vocab, labels, seqlen, bsize=None,\r\n word_embeddings=None, pos_embeddings=None, embedding_size=300,\r\n filter_sizes=[3,4,5], num_filters=200):\r\n tf.reset_default_graph()\r\n\r\n y_true = tf.placeholder(tf.float32, [bsize,len(labels)])\r\n x_length = tf.placeholder(tf.int32, [bsize])\r\n x_input = tf.placeholder(tf.int32, [bsize, seqlen])\r\n x_pos_input = tf.placeholder(tf.int32, [bsize, seqlen])\r\n dropout_keep = tf.placeholder(tf.float32, None)\r\n\r\n mlf_input = tf.placeholder(tf.float32, [bsize, 200])\r\n pos_embedding_size = 30\r\n\r\n w_input = tf.placeholder(tf.float32, [len(word_vocab), embedding_size])\r\n pw_input = tf.placeholder(tf.float32, [len(pos_vocab), pos_embedding_size])\r\n\r\n W_em = tf.Variable(tf.truncated_normal([len(word_vocab), embedding_size], stddev=0.15))\r\n embedding_init = W_em.assign(w_input)\r\n\r\n\r\n W_em_pos = tf.Variable(tf.truncated_normal([len(pos_vocab),\r\n pos_embedding_size], stddev=0.15))\r\n pos_em_init = W_em_pos.assign(pw_input)\r\n\r\n xw_input = tf.nn.embedding_lookup(W_em, x_input)\r\n xp_input = tf.nn.embedding_lookup(W_em_pos, x_pos_input)\r\n\r\n xwp_input = tf.expand_dims(tf.concat([xw_input,xp_input],axis=-1),axis=-1)\r\n input_width = embedding_size + pos_embedding_size\r\n\r\n pooled_outputs = []\r\n for i, filter_size in enumerate(filter_sizes):\r\n filter_shape = [filter_size, input_width, 1, num_filters]\r\n W_conv = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.15))\r\n b_conv = tf.Variable(tf.constant(0.1, shape=[num_filters]))\r\n conv = tf.nn.conv2d(xwp_input, W_conv, strides=[1, 1, 1, 1], padding=\"VALID\")\r\n\r\n h = tf.nn.relu(tf.nn.bias_add(conv, b_conv))\r\n pooled_outputs.append(tf.reduce_max(h,axis=1))\r\n pooled_outputs.append(tf.expand_dims(mlf_input,axis=1))\r\n\r\n num_filters_total = num_filters * len(filter_sizes)\r\n cnn_pool = tf.reshape(tf.concat(pooled_outputs,axis=-1), [-1, num_filters_total + 200])\r\n\r\n\r\n y_out = tf.layers.dense(tf.nn.dropout(cnn_pool, dropout_keep),len(labels))\r\n y_pred = tf.nn.softmax(y_out)\r\n\r\n #reg_loss = sum([ tf.nn.l2_loss(x) for x in tf.trainable_variables() if x != W_em ])\r\n y_loss = tf.losses.softmax_cross_entropy(y_true,y_out) #+ 0.005 * reg_loss\r\n train_step = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(y_loss)\r\n\r\n sess = tf.Session()\r\n sess.run(tf.global_variables_initializer())\r\n if word_embeddings is not None:\r\n sess.run(embedding_init, { w_input: word_embeddings })\r\n\r\n if pos_embeddings is not None:\r\n sess.run(pos_em_init, {pw_input: pos_embeddings})\r\n\r\n param_count = 0\r\n for v in tf.trainable_variables():\r\n param_count += np.prod([ int(dimsize) for dimsize in v.get_shape() ])\r\n\r\n print(\"Compiled model with {} variables and {} parameters\".format(\r\n len(tf.trainable_variables()),param_count))\r\n\r\n saver = tf.train.Saver(max_to_keep=100)\r\n\r\n return NeuralModel(word_vocab, pos_vocab, labels, seqlen, sess, saver, dropout_keep, x_input, x_pos_input, mlf_input,\r\n x_length, y_true, y_out, y_pred, y_loss, train_step)\r\n\r\ndef load_data(fname, pos_file, label_file, mlf_file):\r\n examples = []\r\n labels = cPickle.load(open(label_file, 'rb'))\r\n index = 0\r\n pos_data = []\r\n f = open(pos_file, 'rb')\r\n for line in f:\r\n pos_data.append(line)\r\n pos_data = np.array(pos_data)\r\n f.close()\r\n ml_features = cPickle.load(open(mlf_file, 'rb'))\r\n with open(fname,'r') as f:\r\n for line in f:\r\n label = labels[index]\r\n tokens = [ x.lower() if x not in ['THISCITATION', 'OTHERCITATION'] else x for x in line.split() ]\r\n pos = pos_data[index].split()\r\n examples.append(((tokens, ml_features[index,:], pos),label))\r\n index = index + 1\r\n\r\n return examples\r\n\r\ndef load_embeddings(word_embeddings_file_path, pos_model_file_path, vocab, pos_vocab, dim=300):\r\n shape = (len(vocab), dim)\r\n weight_matrix = np.random.uniform(-0.15, 0.15, shape).astype(np.float32)\r\n pos_shape = (len(pos_vocab), 30)\r\n pos_matrix = np.random.uniform(-0.15, 0.15, pos_shape).astype(np.float32)\r\n vecs = cPickle.load(open(word_embeddings_file_path, 'rb'))\r\n pos_em = Word2Vec.load(pos_model_file_path)\r\n pos_vecs = pos_em.wv\r\n\r\n for i in range(len(vocab)):\r\n if vocab[i] in vecs:\r\n weight_matrix[i,:] = vecs[vocab[i]]\r\n\r\n for i in range(len(pos_vocab)):\r\n if pos_vocab[i] in pos_vecs:\r\n pos_matrix[i,:] = pos_vecs[pos_vocab[i]]\r\n\r\n return weight_matrix, pos_matrix\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.6825114488601685, "alphanum_fraction": 0.6856576204299927, "avg_line_length": 33.40235137939453, "blob_id": "19cb1e08b1da9cc24d057fcd9a6aa166a9786681", "content_id": "84b60d3e84520854e2dfdb023e1d837a4d228173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 14621, "license_type": "no_license", "max_line_length": 140, "num_lines": 425, "path": "/src/gov/nih/nlm/citationsentiment/RuleBasedSentiment.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\n\nimport java.io.IOException;\nimport java.io.PrintWriter;\nimport java.util.ArrayList;\nimport java.util.Arrays;\nimport java.util.HashMap;\nimport java.util.LinkedHashMap;\nimport java.util.LinkedHashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.logging.Logger;\nimport java.util.regex.Matcher;\nimport java.util.regex.Pattern;\n\nimport edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;\nimport edu.stanford.nlp.pipeline.Annotation;\nimport edu.stanford.nlp.util.CoreMap;\nimport gov.nih.nlm.ling.core.ContiguousLexeme;\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.MultiWordLexeme;\nimport gov.nih.nlm.ling.core.Sentence;\nimport gov.nih.nlm.ling.core.Span;\nimport gov.nih.nlm.ling.core.SpanList;\nimport gov.nih.nlm.ling.core.SurfaceElement;\nimport gov.nih.nlm.ling.core.Word;\nimport gov.nih.nlm.ling.core.WordLexeme;\nimport gov.nih.nlm.ling.process.ComponentLoader;\nimport gov.nih.nlm.ling.process.SentenceSegmenter;\nimport gov.nih.nlm.ling.sem.Entity;\nimport gov.nih.nlm.ling.sem.Indicator;\nimport gov.nih.nlm.ling.sem.Predicate;\nimport gov.nih.nlm.ling.sem.SemanticItem;\nimport gov.nih.nlm.ling.sem.Sense;\nimport gov.nih.nlm.ling.sem.Term;\nimport gov.nih.nlm.ling.util.FileUtils;\nimport gov.nih.nlm.ling.wrappers.CoreNLPWrapper;\n\n/**\n * Class to compute citation sentiment.\n * \n * @author Halil Kilicoglu\n *\n */\n\npublic class RuleBasedSentiment {\n\tprivate static Logger log = Logger.getLogger(RuleBasedSentiment.class.getName());\t\n\n\tpublic static RuleBasedSentiment instance; \n\n\t// private static Map<Class<? extends SemanticItem>,List<String>> annTypes;\n\t// private static XMLReader xmlReader;\n\n\tprivate static Map<String,String> lexLines = new HashMap<>();\n\tprivate static Map<String,Double> sentimentScores = new HashMap<>();\n\tprivate static LinkedHashSet<Indicator> sentimentTerms = new LinkedHashSet<>();\n\tprivate static Map<String,String> negTerms = new HashMap<>();\n\n\tprivate static Pattern CIT_PATTERN = Pattern.compile(\"<cit id=\\\"(.+?)\\\">(.+?)<\\\\/cit>\");\n\n\t/**\n\t * Creates an instance and initializes the dictionaries. \n\t * \n\t * @param props Properties to define dictionary locations ,etc.\n\t * @return\n\t */\n\tpublic static RuleBasedSentiment getInstance(Properties props) {\n\t\tif (instance == null) {\n\t\t\tinstance = new RuleBasedSentiment(props);\n\t\t}\n\t\treturn instance;\n\t}\n\t\n\tprivate RuleBasedSentiment(Properties props) {\n\t\t//\t xmlReader = getXMLReader();\n\t\t//\t annTypes = Utils.getAnnotationTypes();\n\t\tthis(props, props.getProperty(\"scoreFile\"));\n\t}\n\t\n\tprivate RuleBasedSentiment(Properties props, String filename) {\n\t\t//\t xmlReader = getXMLReader();\n\t\t//\t annTypes = Utils.getAnnotationTypes();\n\t\tsentimentScores = loadSentimentScores(filename);\n\t\tsentimentTerms = loadSentimentTerms();\n\t\ttry {\n\t\t\tnegTerms = Utils.loadTermDictionary(props.getProperty(\"termDictionary\"),Arrays.asList(\"NEGATION\"));\n\t\t} catch (IOException ioe) {\n\t\t\tlog.severe(\"Unable to load negation terms from file \" + props.getProperty(\"termDictionary\") + \". The program may not work as expected.\");\n\t\t}\n\t}\n\n\t/*\tprivate XMLReader getXMLReader() {\n\t\tXMLReader reader = Utils.getXMLReader();\n\t\treader.addAnnotationReader(Predicate.class, new XMLPredicateReader());\n\t\treturn reader;\n\t}*/\n\n\t/**\n\t * Loads sentiment dictionary scores from a file. Returns a map keyed by the lemma list \n\t * of the sentiment expression and with its precomputed score as the value.\n\t * \n\t * @param filename\tThe name of the file.\n\t * @return\n\t */\n\tprivate Map<String,Double> loadSentimentScores(String filename) {\n\t\tMap<String,Double> scs = new HashMap<>();\n\t\ttry {\n\t\t\tList<String> lines = FileUtils.linesFromFile(filename, \"UTF-8\");\n\t\t\tfor (String l: lines) {\n\t\t\t\tString[] els = l.split(\"[\\\\t]\");\n\t\t\t\tString lex = els[3];\n\t\t\t\tif (els[2].equalsIgnoreCase(\"no\")) continue;\n\t\t\t\tdouble sc = Double.parseDouble(els[7]);\n\t\t\t\tif (sc == 0.0) continue;\n\t\t\t\tscs.put(lex, sc);\n\t\t\t\tlexLines.put(lex,l);\n\t\t\t}\n\t\t\tlog.info(\"Loaded \" + scs.size() + \" scored triggers.\");\n\n\t\t} catch (IOException ioe) {\n\t\t\tlog.severe(\"Unable to load scored triggers from \" + filename +\".. Will terminate.\");\n\t\t\tSystem.exit(1);\n\t\t}\n\t\treturn scs;\n\t}\n\n\tprivate LinkedHashSet<Indicator> loadSentimentTerms() {\n\t\tLinkedHashSet<Indicator> allIndicators = new LinkedHashSet<>();\n\t\tfor (String a: sentimentScores.keySet()) {\n\t\t\t//\t\t\tif (a.equals(\"NO_LEMMA\")) continue;\n\t\t\tString[] as = a.split(\"[ ]+\");\n\t\t\tString[] els = lexLines.get(a).split(\"\\\\t\");\n\t\t\tList<WordLexeme> lexemes = new ArrayList<>(as.length);\n\t\t\tfor (int i=0; i < as.length; i++) {\n\t\t\t\tString inds = as[i];\n\t\t\t\tString lemma = inds.substring(0, inds.indexOf(\"(\"));\n\t\t\t\tString cat = inds.substring(inds.indexOf(\"(\") +1,inds.indexOf(\")\"));\n\t\t\t\tWordLexeme lex = new WordLexeme(lemma,cat);\n\t\t\t\tlexemes.add(lex);\n\t\t\t}\n\t\t\tContiguousLexeme rl = null;\n\t\t\tif (lexemes.size() == 1) {\n\t\t\t\trl = lexemes.get(0);\n\t\t\t} else {\n\t\t\t\trl = new MultiWordLexeme(lexemes);\n\t\t\t}\t\t\n\t\t\tIndicator ind = new Indicator(els[0],Arrays.asList(rl),false,Arrays.asList(new Sense(els[1])));\n\t\t\tallIndicators.add(ind);\n\t\t}\n\t\tfor (Indicator a: allIndicators) {\n\t\t\tlog.fine(\"Loaded trigger:\" + a.toString());\n\t\t}\n\t\treturn allIndicators;\n\t}\n\n\n\t/** \n\t * Processes all citation mentions in a document.\n\t * \n\t * @param doc Document to process\n\t * @return\ta String representation of the results (one line per citation mention)\n\t */\n\tpublic String processMentions(Document doc) {\n\t\tLinkedHashSet<SemanticItem> cms = Document.getSemanticItemsByClass(doc, CitationMention.class);\n\t\tStringBuilder buf = new StringBuilder();\n\t\tfor (SemanticItem s: cms) {\n\t\t\tCitationMention cm = (CitationMention)s;\n\t\t\tString str = processMention(cm);\n\t\t\tbuf.append(str); buf.append(\"\\n\");\n\t\t}\n\t\treturn buf.toString();\n\t}\n\n\t/**\n\t * Processes a single citation mention.\n\t * \n\t * @param mention\tCitation mention to process\n\t * @return\ta single line String representation of the result (<code>DocID|MentionID|MentionText|Prediction|Score|Triggers</code>)\n\t */\n\tpublic String processMention(CitationMention mention) {\n\t\tMap<String,Double> scoreMap = calculateScoreMap(mention);\n\t\tdouble cumScore = 0.0;\n\t\tfor (String c: scoreMap.keySet()) {\n\t\t\tcumScore += scoreMap.get(c);\n\t\t}\n\t\tString predict = predict(scoreMap);\n\t\tStringBuffer scoreBuf = new StringBuffer();\n\t\tfor (String t: scoreMap.keySet()) {\n\t\t\tscoreBuf.append(t + \"(\" + scoreMap.get(t) + \"),\");\n\t\t}\n\t\tString scoreStr = scoreBuf.toString();\n\t\treturn (mention.getDocument().getId() + \"|\" + mention.getId() + \"|\" + mention.getText() + \"|\" + predict + \"|\" + cumScore + \"|\" + \n\t\t\t\t(scoreStr.equals(\"\") ? \"\" : scoreStr.substring(0,scoreStr.length()-1)));\n\t}\n\n\t/**\n\t * Reads and annotates a single text file. The file is expected to consists of one sentence per line.\n\t * Each citation mention should be surrounded by \"cit\" tags. (e.g. <pre>{@code <cit id=\"C1\">[1-5]</cit>}</pre>)\n\t * \n\t * @param filename\tThe filename to process\n\t * @param props\tProperties to use in annotation\n\t * @return a Document object with all the relevant terms and citation mentions annotated, null if the processing fails\n\t */\n\tpublic Document preProcessTextFile(String filename,Properties props) {\n\t\tDocument doc = null;\n\t\ttry {\n\t\t\tString text = FileUtils.stringFromFile(filename, \"UTF-8\");\n\t\t\tdoc = preProcessString(filename,text,props);\n\n\t\t} catch (IOException ie) {\n\t\t\tlog.severe(\"Unable to process input file \" + filename);\n\t\t\tie.printStackTrace();\n\t\t\tSystem.exit(1);\n\t\t} \n\t\treturn doc;\n\t}\n\t\n\t\n\tpublic static void coreNLP(Sentence sent) {\n\t\tif (sent.getText().trim().equals(\"\")) {\n\t\t\treturn;\n\t\t}\n\t\tAnnotation annotation = CoreNLPWrapper.coreNLP(sent.getText(), true);\n\t List<CoreMap> sentenceAnns = annotation.get(SentencesAnnotation.class); \n\t if (sentenceAnns == null || sentenceAnns.size() == 0) {\n\t \tlog.warning(\"No sentence annotations were generated. Skipping coreNLP..\");\n\t \treturn;\n\t }\n\t List<Word> words = new ArrayList<>();\n//\t List<SynDependency> depList = new ArrayList<>();\n\t for (int i=0; i < sentenceAnns.size(); i++) {\n\t if (sentenceAnns.size() == 1) {\n\t \tCoreMap sentAnn = sentenceAnns.get(0);\n\t \twords = CoreNLPWrapper.getSentenceWords(sentAnn,sent.getSpan().getBegin());\n\t \tsent.setWords(words);\n\t \tfor (Word w : words) w.setSentence(sent);\n//\t \tsent.setTree(getSentenceTree(sentAnn));\n//\t \tdepList = getSentenceDependencies(sentAnn,words);\n//\t \tsent.setDependencyList(depList); \n\t \tsent.setSurfaceElements(new ArrayList<SurfaceElement>(words));\n//\t \tsent.setEmbeddings(new ArrayList<SynDependency>(depList));\n\t } \n\t }\n\t}\n\t\n\tpublic Document preProcessString(String id, String input,Properties props) {\n\t\tDocument doc = null;\n\t\ttry {\n\t\t\tSentenceSegmenter segmenter = ComponentLoader.getSentenceSegmenter(props);\n\t\t\tCoreNLPWrapper.getInstance(props);\n\t\t\tLinkedHashMap<String,SpanList> citSpans = identifyCitationSpans(input);\n//\t\t\tString textOnly =input.replaceAll(\"<(.+?)>\", \"\").trim();\n\t\t\tString textOnly =input.replaceAll(\"<cit(.+?)>\", \"\").replaceAll(\"</cit>\",\"\").trim();\n\t\t\tdoc = new Document(id,textOnly);\n\t\t\tCitationFactory sf = new CitationFactory(doc,new HashMap<>());\n\t\t\tdoc.setSemanticItemFactory(sf);\n\t\t\tList<Sentence> sentences = new ArrayList<>();\n\t\t\tsegmenter.segment(doc.getText(), sentences);\n\t\t\tdoc.setSentences(sentences);\n\t\t\tfor (int i=0; i < doc.getSentences().size(); i++) {\n\t\t\t\tSentence sent = doc.getSentences().get(i);\n\t\t\t\tsent.setDocument(doc);\n\t\t\t\t// create word list, pos, lemma info\n\t\t\t\tcoreNLP(sent);\t\n\t\t\t}\n\n\t\t\tint i= 0;\n\t\t\tfor (String st: citSpans.keySet()) {\n\t\t\t\tSpanList sp = citSpans.get(st);\n\t\t\t\tCitationMention m = sf.newCitationMention(doc, \"CitationMention\", sp, sp, doc.getText().substring(sp.getBegin(), sp.getEnd()));\n\t\t\t\tm.setContext(Arrays.asList(doc.getSubsumingSentence(sp.asSingleSpan()).getSpan()));\n\t\t\t\ti++;\n\t\t\t\tm.setId(st);\n\t\t\t}\n\t\t\tannotateTerms(doc,props);\n\t\t} catch (Exception e) {\n\t\t\t\tlog.warning(\"Unable to segment sentences.\");\n\t\t\t\te.printStackTrace();\n\t\t\t}\n\t\treturn doc;\n\t}\n\n\tprivate LinkedHashMap<String,SpanList> identifyCitationSpans(String text) {\n\t\tLinkedHashMap<String,SpanList> spans = new LinkedHashMap<>();\n\t\tint ind= 0;\n\t\tStringBuilder buf = new StringBuilder();\n\t\tMatcher m = CIT_PATTERN.matcher(text);\n\t\twhile (m.find()) {\n\t\t\tString pre = text.substring(ind,m.start());\n\t\t\tint citBeg = buf.toString().length() + pre.length();\n\t\t\tString citId = m.group(1);\n\t\t\tbuf.append(pre);\n\t\t\tbuf.append(m.group(2));\n\t\t\tSpanList sp = new SpanList(citBeg,citBeg + m.group(2).length());\n\t\t\tind = m.end();\n\t\t\tspans.put(citId, sp);\n\t\t}\n\t\treturn spans;\n\t}\n\n\t/**\n\t * Annotates a document with sentiment-related terms.\n\t * \n\t * @param doc\tThe document to annotate\n\t * @param properties\t\tProperties relevant for annotation\n\t */\n\tpublic void annotateTerms(Document doc, Properties properties) {\n\t\tUtils.annotateIndicators(doc,sentimentTerms,properties);\n\t\tUtils.annotateStrings(doc, negTerms, properties);\n\t\tUtils.removeSubsumedTerms(doc);\n\t}\n\n\t/**\n\t * Computes a score map for a citation mention. \n\t * \n\t * @param mention\tThe citation mention \n\t * @return\ta map where keys are sentiment terms found and the values are scores associated with them (taking negation into account).\n\t */\n\tpublic Map<String,Double> calculateScoreMap(CitationMention mention) {\n\t\tList<Span> context = mention.getContext();\n\t\tDocument doc = mention.getDocument();\n\t\t/*\t LinkedHashSet<SemanticItem> preds = Document.getSemanticItemsByClass(doc, Predicate.class);\n\t if (preds.size() == 0) {\n\t\t Utils.annotateIndicators(doc,sentimentTerms,properties);\n\t }*/\n\t\tSurfaceElement surf = mention.getSurfaceElement();\n\t\tint contextSize = 0;\n\t\tint index = 0;\n\t\tfor (Span sp : context) {\n\t\t\tList<SurfaceElement> surfs = doc.getSurfaceElementsInSpan(sp);\n\t\t\tif (surfs.contains(mention.getSurfaceElement())) {\n\t\t\t\tcontextSize = surfs.size();\n\t\t\t\tindex = context.indexOf(sp);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tMap<String,Double> scoreMap = new HashMap<>();\n\t\tfor (Span sp: context) {\n\t\t\tLinkedHashSet<SemanticItem> sems = Document.getSemanticItemsByClassSpan(doc, Term.class, new SpanList(sp), false);\n\t\t\tfor (SemanticItem sem : sems) {\n\t\t\t\tif (sem.getType().equals(\"NEGATION\")) continue;\n\t\t\t\tif (sem instanceof Entity || sem instanceof CitationMention) continue;\n\t\t\t\tPredicate pred = (Predicate)sem;\n\t\t\t\tSurfaceElement prs = pred.getSurfaceElement();\n\t\t\t\tSurfaceElement prev =prs.getSentence().getPrecedingSurfaceElement(prs);\n\t\t\t\tif (prev == null && pred.getType().equals(\"discourse\")) continue;\n\t\t\t\tIndicator ind = pred.getIndicator();\n\t\t\t\tString key = ind.getLexeme().toString().replaceAll(\"_\", \" \");\n\t\t\t\tdouble score = sentimentScores.get(key);\n\t\t\t\tint intrv = contextSize;\n\t\t\t\tif (context.indexOf(sp) == index) \n\t\t\t\t\tintrv = doc.getInterveningSurfaceElements(prs, surf).size();\n\t\t\t\tdouble factor = (double)Math.log(2)/Math.log(intrv+2);\n\t\t\t\tboolean neg = false;\n\t\t\t\tif (prev != null) {\n\t\t\t\t\tLinkedHashSet<SemanticItem> negs =prev.filterByEntities();\n\t\t\t\t\tfor (SemanticItem n: negs) {\n\t\t\t\t\t\tif (n.getType().equals(\"NEGATION\")) {\n\t\t\t\t\t\t\tneg = true;\n\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} \n\t\t\t\tif (!neg) {\n\t\t\t\t\tSurfaceElement prevprev =prs.getSentence().getPrecedingSurfaceElement(prev);\n\t\t\t\t\tif (prevprev != null) {\n\t\t\t\t\t\tLinkedHashSet<SemanticItem> negs =prevprev.filterByEntities();\n\t\t\t\t\t\tfor (SemanticItem n: negs) {\n\t\t\t\t\t\t\tif (n.getType().equals(\"NEGATION\")) {\n\t\t\t\t\t\t\t\tneg = true;\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t} \n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (score > 0) {\n\t\t\t\t\tif (!neg) scoreMap.put(key, score);\n\t\t\t\t\telse scoreMap.put(key, -score);\n\t\t\t\t} else {\n\t\t\t\t\tif (!neg) scoreMap.put(key, score*factor);\n\t\t\t\t\telse scoreMap.put(key, -score*factor);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn scoreMap;\n\t}\n\n\t/**\n\t * Returns the prediction based on the computed score map.\n\t * \n\t * @param map\tThe score map \n\t * @return \"NEUTRAL\", \"NEGATIVE\" or \"POSITIVE\"\n\t */\n\tpublic String predict(Map<String,Double> map) {\n\t\tdouble cumScore = 0.0;\n\t\tfor (String c: map.keySet()) {\n\t\t\tcumScore += map.get(c);\n\t\t}\n\t\tString predict = \"NEUTRAL\";\n\t\tif (cumScore > 1) predict = \"POSITIVE\";\n\t\telse if (cumScore <-0.1) predict = \"NEGATIVE\";\n\t\treturn predict;\n\t}\n\n\t/**\n\t * Command line (intended entry).\n\t */\n\tpublic static void main(String[] argv) throws Exception {\n\t\tProperties props = FileUtils.loadPropertiesFromFile(\"citation.properties\");\n\t\tString infile = argv[0];\n\t\tString outfile = argv[1];\n\t\tRuleBasedSentiment instance = RuleBasedSentiment.getInstance(props);\n\n\t\tDocument doc = instance.preProcessTextFile(infile,props);\n\t\tif (doc != null) {\n\t\t\tString out = instance.processMentions(doc);\n\t\t\tSystem.out.println(out);\n\t\t\tPrintWriter pw = new PrintWriter(outfile);\n\t\t\tpw.println(out);\n\t\t\tpw.flush();\n\t\t\tpw.close();\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.7587412595748901, "alphanum_fraction": 0.7657342553138733, "avg_line_length": 20.923076629638672, "blob_id": "ceb9d2797a476a6401b7a28f108da8bcc5d62550", "content_id": "b95e3dde84d88a132bee6063719201fb5804ab0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 286, "license_type": "no_license", "max_line_length": 110, "num_lines": 13, "path": "/scripts/ruleBasedPrediction.sh", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nexport CLASSPATH=\"\"\nfor file in `ls dist`\ndo\n export CLASSPATH=$CLASSPATH:dist/$file\ndone\nfor file in `ls lib`\ndo\n export CLASSPATH=$CLASSPATH:lib/$file\ndone\n\njava -Djava.util.logging.config.file=logging.properties gov.nih.nlm.citationsentiment.RuleBasedSentiment $1 $2\n\n" }, { "alpha_fraction": 0.6716398596763611, "alphanum_fraction": 0.6784566044807434, "avg_line_length": 34.82949447631836, "blob_id": "1b5f60d73635235f1383e8fef7211d1e9c92d418", "content_id": "ccbc43bdd6e0bfdec8ff3e8bb69940b89277d188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 7775, "license_type": "no_license", "max_line_length": 114, "num_lines": 217, "path": "/src/gov/nih/nlm/citationsentiment/RuleBasedSentimentEval.java", "repo_name": "xl60-hust/clinical-citation-sentiment", "src_encoding": "UTF-8", "text": "package gov.nih.nlm.citationsentiment;\n\nimport java.io.File;\nimport java.io.IOException;\nimport java.io.PrintWriter;\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.HashMap;\nimport java.util.HashSet;\nimport java.util.LinkedHashSet;\nimport java.util.List;\nimport java.util.Map;\nimport java.util.Properties;\nimport java.util.Set;\nimport java.util.logging.Logger;\n\nimport gov.nih.nlm.ling.core.Document;\nimport gov.nih.nlm.ling.core.SurfaceElement;\nimport gov.nih.nlm.ling.io.XMLPredicateReader;\nimport gov.nih.nlm.ling.io.XMLReader;\nimport gov.nih.nlm.ling.sem.Predicate;\nimport gov.nih.nlm.ling.sem.SemanticItem;\nimport gov.nih.nlm.ling.util.FileUtils;\n\n/**\n * Class to evaluate the rule-based sentiment citation analysis program. \n * \n * @author Halil Kilicoglu\n *\n */\npublic class RuleBasedSentimentEval {\n\tprivate static Logger log = Logger.getLogger(RuleBasedSentimentEval.class.getName());\t\n\n\tprivate RuleBasedSentiment instance; \n\n\tprivate static Map<Class<? extends SemanticItem>,List<String>> annTypes;\n\tprivate static XMLReader xmlReader;\n\n\tprivate static Map<String,List<CitationMention>> annoTP = new HashMap<>();\n\tprivate static Map<String,List<CitationMention>> annoFP = new HashMap<>();\n\tprivate static Map<String,List<CitationMention>> annoFN = new HashMap<>();\n\n\n\tpublic RuleBasedSentimentEval(Properties props) throws Exception {\n\t\txmlReader = getXMLReader();\n\t\tannTypes = Utils.getAnnotationTypes();\n\t\tinstance = RuleBasedSentiment.getInstance(props);\n\t}\n\n\tprivate XMLReader getXMLReader() {\n\t\tXMLReader reader = Utils.getXMLReader();\n\t\treader.addAnnotationReader(Predicate.class, new XMLPredicateReader());\n\t\treturn reader;\n\t}\n\n\t/**\n\t * Processes all corpus XML files and evaluates the program results.\n\t * \n\t * @param dir\tThe corpus directory\n\t * @param props\n\t * @param pw\tThe PrintWriter object associated with the output file\n\t */\n\tpublic void processDir(String dir,Properties props, PrintWriter pw) {\n\t\tList<CitationMention> instances = new ArrayList<>();\n\t\ttry {\n\t\t\tList<String> files = FileUtils.listFiles(dir, false, \"xml\");\n\t\t\tCollections.sort(files);\n\t\t\tint fileNum = 0;\n\t\t\tfor (String filename: files) {\n\t\t\t\tString filenameNoExt = filename.replace(\".xml\", \"\");\n\t\t\t\tfilenameNoExt = filenameNoExt.substring(filenameNoExt.lastIndexOf(File.separator)+1);\n\t\t\t\tlog.info(\"Processing \" + filenameNoExt + \":\" + ++fileNum);\n\t\t\t\tList<CitationMention> fileInstances = preProcessFile(filename,props);\n\t\t\t\tfor (CitationMention ins: fileInstances) {\n\t\t\t\t\tString gold = ins.getMetaData(\"goldSentiment\");\n\t\t\t\t\tString[] scoreStrs = instance.processMention(ins).split(\"[\\\\|]\");\n\n\t\t\t\t\tString predict = scoreStrs[3];\n\t\t\t\t\tString cumScore = scoreStrs[4];\n\t\t\t\t\tString scoreStr = \"\";\n\t\t\t\t\tif (scoreStrs.length > 5)\n\t\t\t\t\t\tscoreStr = scoreStrs[5];\n\n\t\t\t\t\tpw.println(predict + \"|\" + gold + \"|\" + cumScore + \"|\" + scoreStr + \"|\" + ins.toString());\n\t\t\t\t\tif (predict.equals(gold)) {\n\t\t\t\t\t\tList<CitationMention> tps = annoTP.get(predict);\n\t\t\t\t\t\tif (tps == null) tps = new ArrayList<CitationMention>();\n\t\t\t\t\t\ttps.add(ins);\n\t\t\t\t\t\tannoTP.put(predict, tps);\n\t\t\t\t\t} else {\n\t\t\t\t\t\tList<CitationMention> fps = annoFP.get(predict);\n\t\t\t\t\t\tif (fps == null) fps = new ArrayList<CitationMention>();\n\t\t\t\t\t\tfps.add(ins);\n\t\t\t\t\t\tannoFP.put(predict, fps);\n\t\t\t\t\t\tList<CitationMention> fns = annoFN.get(gold);\n\t\t\t\t\t\tif (fns == null) fns = new ArrayList<CitationMention>();\n\t\t\t\t\t\tfns.add(ins);\n\t\t\t\t\t\tannoFN.put(gold, fns);\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tinstances.addAll(fileInstances);\n\t\t\t}\n\t\t} catch (IOException ie) {\n\t\t\tlog.severe(\"Unable to read input files from \" + dir);\n\t\t}\n\t}\n\n\t/**\n\t * Reads a file from XML and preprocesses it for gold labels and sentiment terms. \n\t * \n\t * @param filename\tThe file to process\n\t * @param props\n\t * @return The list of citation mentions from the file\n\t */\n\tpublic List<CitationMention> preProcessFile(String filename,Properties props) {\n\t\tDocument doc = null;\n\t\tdoc = xmlReader.load(filename, true,CitationFactory.class, annTypes, null);\n\t\tLinkedHashSet<SemanticItem> cms = Document.getSemanticItemsByClass(doc, CitationMention.class);\n\t\tMap<String,String> gold = new HashMap<String,String>();\n\t\tfor (SemanticItem s: cms) {\n\t\t\tif (s instanceof CitationMention == false) continue;\n\t\t\tCitationMention cm = (CitationMention)s;\n\t\t\tString sentiment = cm.getSentiment().toString();\n\t\t\tgold.put(cm.getId(), sentiment);\n\t\t}\n\t\treturn preProcessDocument(doc,gold,props);\n\t}\n\n\tprivate List<CitationMention> preProcessDocument(Document doc,Map<String,String> gold, Properties properties) {\n\t\tinstance.annotateTerms(doc, properties);\n\t\tList<CitationMention> citationMentions = new ArrayList<>();\n\t\tSet<SurfaceElement> seen = new HashSet<SurfaceElement>();\n\t\tLinkedHashSet<SemanticItem> mentions = Document.getSemanticItemsByClass(doc, CitationMention.class);\n\t\tfor (SemanticItem m: mentions) {\n\t\t\tif (m instanceof CitationMention == false) continue;\n\t\t\tif (m.getId().startsWith(\"C\") == false) continue;\n\t\t\tCitationMention cm = (CitationMention)m;\n\t\t\tSurfaceElement surf = cm.getSurfaceElement();\n\t\t\tif (seen.contains(surf)) continue;\n\t\t\tString goldSentiment = gold.get(cm.getId());\n\t\t\tif (goldSentiment.equals(\"NONE\")) continue;\n\t\t\t// current sentence only\n\t\t\t//\t\t if (cm.getContext() == null)\n\t\t\tcm.setContext(Utils.getCitationContext(cm,true)); \n\t\t\tcm.setMetaData(\"goldSentiment\", goldSentiment);\n\t\t\tcitationMentions.add(cm);\n\t\t\tseen.add(surf);\n\t\t}\n\t\treturn citationMentions;\n\t}\n\n\t/** \n\t * Compute evaluation metrics and write the output file.\n\t * \n\t * @param pw A PrintWriter object associated with the output file\n\t */\n\tpublic void writeEvaluation(PrintWriter pw) {\n\t\tint gTP = 0, gFP = 0;\n\t\tdouble gF1 = 0;\n\t\tfor (CitationMention.Sentiment sent: CitationMention.Sentiment.values()){\n\t\t\tif (sent == CitationMention.Sentiment.NONE) continue;\n\t\t\tString a = sent.toString();\n\t\t\tint TP = (annoTP.get(a) == null ? 0 : annoTP.get(a).size());\n\t\t\tint FP = (annoFP.get(a) == null ? 0 : annoFP.get(a).size());\n\t\t\tint FN = (annoFN.get(a) == null ? 0 : annoFN.get(a).size());\n\t\t\tgTP += TP;\n\t\t\tgFP += FP;\n\t\t\tdouble precision = 0;\n\t\t\tdouble recall = 0;\n\t\t\tdouble f_measure = 0;\n\t\t\tif (TP+FP > 0) { precision = (double)TP/(TP+FP); }\n\t\t\tif (TP+FN > 0) { recall = (double)TP/(TP+FN); }\n\t\t\tif ((precision+recall) > 0) { \n\t\t\t\tf_measure = (2*precision*recall)/(double)(precision+recall); \n\t\t\t}\n\t\t\tgF1 += f_measure;\n\n\t\t\tpw.write(a + \"\\t\" + (TP +FP) + \"(\" + TP + \")\" + \"\\t\" + (TP + FN) + \"(\" + TP + \")\"\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", precision)\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", recall)\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", f_measure)); \t\t\n\t\t\tpw.write(\"\\n\");\n\n\t\t\tSystem.out.println(a + \"\\t\" + (TP +FP) + \"(\" + TP + \")\" + \"\\t\" + (TP + FN) + \"(\" + TP + \")\"\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", precision)\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", recall)\n\t\t\t\t\t+ \"\\t\" + String.format(\"%1.4f\", f_measure)); \t\t\n\t\t}\n\n\t\tdouble accuracy = 0;\n\t\tdouble macrof1 = 0;\n\n\t\tif (gTP+gFP > 0) { accuracy = (double)gTP/(gTP+gFP); }\n\t\tmacrof1 = (double)gF1/3; \n\n\n\t\tpw.write(\"Accuracy: \" + String.format(\"%1.4f\", accuracy)); pw.write(\"\\n\");\n\t\tpw.write(\"Macro-F1: \" + String.format(\"%1.4f\", macrof1)); pw.write(\"\\n\");\n\t\tpw.write(\"\\n\");\n\n\t\tSystem.out.println(\"Accuracy: \" + String.format(\"%1.4f\", accuracy)); \n\t\tSystem.out.println(\"Macro-F1: \" + String.format(\"%1.4f\", macrof1)); \n\t}\n\n\tpublic static void main(String[] argv) throws Exception {\n\t\tProperties props = FileUtils.loadPropertiesFromFile(\"citation.properties\");\n\t\tString outfile = argv[0];\n\t\tRuleBasedSentimentEval eval = new RuleBasedSentimentEval(props);\n\t\tString trainDir = props.getProperty(\"sentimentTrainDirectory\");\n\t\tString testDir = props.getProperty(\"sentimentAllDirectory\");\n\t\tPrintWriter pw = new PrintWriter(outfile);\n\t\teval.processDir(testDir,props,pw);\n\t\teval.writeEvaluation(pw);\n\t\tpw.flush();\n\t\tpw.close();\n\t}\n}\n" } ]
14
mukoko99/unfold-wardrobe-manager
https://github.com/mukoko99/unfold-wardrobe-manager
060bddcff2df0b6e67738e5e9f0bd04b8fe7949a
142040a774d60b7883d97e0c0af226e730ff2b5b
379e70eb2d002e0bd605a75444f7b0c37add05c7
refs/heads/master
2023-05-22T18:08:25.433746
2020-03-06T17:27:01
2020-03-06T17:27:01
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7942005395889282, "alphanum_fraction": 0.7942005395889282, "avg_line_length": 56.72093200683594, "blob_id": "4d45f675a53577b3ae1315e33f3fee8680eb69f9", "content_id": "4d52dc87f9d7098876f70733cd98a1832aec86a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2483, "license_type": "no_license", "max_line_length": 741, "num_lines": 43, "path": "/README.md", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "# Unfold\n\nRediscover joy in your closet. Easily choose what to wear with personalized outfit recommendations that use event tags, weather, and user's wear logs.\n\nSee all your clothes in one place, catalog and combine clothing articles into outfits, and log outfits worn to see statistics and receive recommendations.\n\n![alt text](readme-images/login.png \"Login screenshot\")\n\n## Homepage\n\n![alt text](readme-images/homepage.png \"Homepage screenshot\")\n\nIf a user has logged an event for the day, it will appear on the homepage along with an outfit recommendation for each event (or the assigned outfit for each event.) Here you will also see a weather preview supplied by the DarkSky API for today and tomorrow, along with a carousel of user stats like total articles, best value outfit, and most popular tags. \n\n## Categories\n\n![alt text](readme-images/categories.png \"Categories screenshot\")\n\nCategories are user-created, but inherit from base categories like \"Tops\", \"Hats\", \"Full-Length\", and \"Outerwear\". \n\n![alt text](readme-images/category-detail.png \"Category detail screenshot\")\n\nSelecting an individual category shows all articles belonging to the category.\n\n## Articles\n\n![alt text](readme-images/articles.png \"Articles screenshot\")\n\nArticles are displayed by category, and categories are sorted by base category. Each thumbnail is a link to an article detail view.\n\n## Article Details\n\n![alt text](readme-images/article-detail.png \"Article detail screenshot\")\n\nThe article detail view shows the article description if provided, the image, purchase price, and tags. The user can update article details as well.\n\n## Article Creation\n\n![alt text](readme-images/article-create.png \"Create article screenshot\")\n\n# About the Developer\n\nAthelia changed careers to launch a career of creative problem solving, growth, and new challenges. Prior to becoming a SWE student, she worked for a building materials company in a variety of roles, from doing document design and copywriting in the Marketing department to producing the price list and pricing tools in the Sales & Bid Center. For the latter, she developed an automated tool to produce quotes on behalf of sales representatives, reducing errors both in calculation and data entry, and eliminating repetitive calculations. Athelia looks forward to using her experience with this project and her new skills gained through Hackbright to explore even more complex problems and create more tools that will improve people's lives.\n\n" }, { "alpha_fraction": 0.5838015079498291, "alphanum_fraction": 0.5871860980987549, "avg_line_length": 33.59836196899414, "blob_id": "3e22f2ede46ec9ab2ca820d628110777ffb53d39", "content_id": "07dec56e57f635410571972362c6b17b57e6c72b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 29546, "license_type": "no_license", "max_line_length": 89, "num_lines": 854, "path": "/server.py", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "\"\"\"Web app for wardrobe management\"\"\"\n\nfrom jinja2 import StrictUndefined\nfrom flask import Flask, render_template, redirect, request, flash, session, g\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom werkzeug.utils import secure_filename\n\nimport os\nimport requests\nfrom datetime import datetime, date, timedelta\nfrom sqlalchemy import asc, update, func\nimport random\n\n# import flask_restless\n# from flask_login import LoginManager\n\n# Import helper function, SQLAlchemy database, and model definitions\nfrom model import (connect_to_db, db, User, BaseCategory, Category, Article,\n Outfit, Tag, WearEvent, ArticleOutfit, TagArticle, TagOutfit, TagEvent)\n\n# Handles image upload and storage\nimport cloudinary\nfrom cloudinary.uploader import upload\nimport cloudinary.api\n\n#########################\n# REFACTOR ME\n# Import functions for image storage and processing\nfrom image_handling import allowed_file, ALLOWED_EXTENSIONS\n\n# CITIES' latitude and longitude\nfrom global_var import CITIES, MONTHS\n\n# Compare clothing prices\n# from etsy import Etsy\n\n# Get weather from OpenWeatherMap API\n# import pyowm\n\n# Get weather from DarkSky API\nfrom darksky import forecast\n\napp = Flask(__name__)\napp.config.from_pyfile('flaskconfig.cfg')\n\n# manager = flask_restless.APIManager(app)\n\n# Set Cloudinary API configuration from environmental variables\ncloudinary.config.update = ({\n 'cloud_name':os.environ.get('CLOUDINARY_CLOUD_NAME'),\n 'api_key': os.environ.get('CLOUDINARY_API_KEY'),\n 'api_secret': os.environ.get('CLOUDINARY_API_SECRET')\n })\n\n# Set Etsy API config from environmental variables\n# etsy_config = ({\n # 'api_key': os.environ.get('ETSY_API_KEY'),\n # 'api_secret': os.environ.get('ETSY_API_SECRET')\n # })\n# Manual assignment of API key\n# etsy_api = Etsy(etsy_config['api_key'])\n\n# Set OpenWeatherMap API key\n# owm = pyowm.OWM(os.environ.get('OPEN_WEATHER_API_KEY'))\n\n# Set DarkSky API key\ndark_sky = ({\n 'secret':os.environ.get('DARK_SKY_API_SECRET'),\n })\n\n# Normally, if you use an undefined variable in Jinja2, it fails\n# silently. This is horrible. Fix this so that, instead, it raises an\n# error.\napp.jinja_env.undefined = StrictUndefined\n\n# Flask-Login is WIP\n# # Flask-Login needs some setup\n# login_manager = LoginManager()\n# login_manager.init_app(app)\n\n\n# Flask-Login is WIP\n# AttributeError: type object 'User' has no attribute 'get'\n# @login_manager.user_loader\n# def load_user(user_id):\n# return User.query.filter(User.user_id == user_id).first()\n\n\ndef downsize_image(articles, width=300, height=300):\n \"\"\"Replaces instance attribute article.image with a downsized image url.\n\n Because it transforms the original input in place, function returns None.\n Uses Cloudinary's built-in transformations on the server side to render the\n new url as a smaller image.\n \"\"\"\n\n thumbnail_format = f'w_{width},h_{height},c_fill'\n for article in articles:\n split_url = article.image.split('/')\n split_url.insert(6, thumbnail_format)\n article.image = '/'.join(split_url)\n\n\n###############################################################################\n# #\n# BASIC ROUTES #\n# #\n###############################################################################\n\n\n# TODO: consistent single quotes in render_template template names\[email protected]('/')\ndef index():\n \"\"\"If logged in, display homepage to go to outfits, categories, or articles.\"\"\"\n \n if session.get('user_id', None):\n city = CITIES['SFO']\n # Dark Sky requires a date in isoformat\n weather = forecast.Forecast(dark_sky['secret'], city['lat'], city['lng'])\n hourly = weather.hourly\n daily = weather.daily\n\n # Hard-code UTC-8 because haven't implemented proper time zone \n # localization (yet, anyway)\n # TODO: User can set time zone in profile\n time_offset = - 8 * 60 * 60\n\n for hour in hourly:\n hour.datestr = datetime.utcfromtimestamp(hour['time'] + \n time_offset).strftime('%m/%d %H:%M')\n for day in daily:\n day.datestr = datetime.utcfromtimestamp(day['time'] + \n time_offset).strftime('%m/%d')\n\n events_today = filter_events_today()\n \n outfits = Outfit.query.filter(Outfit.user_id == session['user_id']).all()\n user = User.query.get(session['user_id'])\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n categories = sort_categories_by_base(categories)\n # TODO: Find a way to save results of this function so the crazy Tag \n # queries don't run every time user goes back to homepage\n # session['user_stats'] = user_stats\n user_stats = user.get_stats()\n random_category = user.categories[random.randint(0,len(user.categories)-1)] \\\n if user.categories else None\n random_category2 = user.categories[random.randint(0,len(user.categories)-1)] \\\n if user.categories else None\n random_tag = ['article', 'outfit', 'event'][random.randint(0,2)]\n \n # TODO: add the other outfit recs as options at subsequent indices\n # outfit_recs[event] = [top_pick, other option, different option...]\n outfit_recs = {}\n coat_count = None\n for event in events_today:\n filtered = event.filter_outfits_by_weather_and_recent()\n if filtered:\n if filtered['top_pick']: \n outfit_recs[event] = filtered['top_pick']\n else:\n outfit_recs[event] = filtered['all_picks'][-1]\n coat_count = event.recommend_coats()\n\n # TODO: May be irrelevant in the scheme of things, but this is quite\n # inefficient. Iterates over all articles in all outfits, rather than\n # only the ones we will need to display.\n for outfit in outfits:\n downsize_image(outfit.articles, width=500, height=500)\n\n return render_template(\"homepage.html\",\n hourly = hourly,\n daily = daily,\n events_today = events_today,\n outfit_recs = outfit_recs,\n outfits = outfits,\n user_stats = user_stats,\n random_category = random_category,\n random_category2 = random_category2,\n random_tag = random_tag,\n coat_count = coat_count or None\n )\n else:\n return render_template(\"login.html\")\n\n\ndef filter_events_today():\n \"\"\"Set time window for 'today' and return user's events within window.\"\"\"\n\n now = datetime.today()\n today_start = datetime(now.year, now.month, now.day, 0, 0, 0)\n today_end = datetime(now.year, now.month, now.day, 23, 59, 59)\n events = WearEvent.query.filter(WearEvent.user_id == session['user_id'])\n events_today = events.filter(today_start <= WearEvent.date)\n events_today = events_today.filter(WearEvent.date <= today_end)\n events_today = events_today.order_by(WearEvent.date).all()\n return events_today\n\n\[email protected]('/login')\ndef login():\n \"\"\"Check login credentials against database.\"\"\"\n\n # Cast to lowercase to match database.\n email = request.args.get('email').lower()\n password = request.args.get('password')\n\n # Using .first() to return a none-type if not valid\n user = User.query.filter(User.email == email, User.password == password).first()\n\n if user:\n # Add user's id and email to session\n session['user_id'] = user.user_id\n session['user_email'] = user.email\n # Flask-Login is WIP\n # login_user(user)\n user = User.query.get(session['user_id'])\n flash(f\"Welcome back, {session['user_email']}!\")\n return redirect('/')\n else:\n flash(\"Invalid email & password combination. Please try again.\")\n return redirect('/')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out of session.\"\"\"\n\n # Remove user's id and email from session\n del session['user_id']\n del session['user_email']\n # Flask-Login is WIP\n # login_user(user)\n flash(f\"Logged out successfully.\")\n return redirect('/')\n\n\n# WIP - This template does not exist!\[email protected]('/create-account')\ndef create_account_page():\n \"\"\"Display account creation form.\"\"\"\n\n return render_template(\"new-account.html\")\n\n\n# WIP\[email protected]('/profile')\ndef show_profile():\n \"\"\"Display logged-in user's profile.\"\"\"\n\n if session.get('user_id'):\n user = User.query.filter_by(user_id = session['user_id']).one()\n else:\n user = None\n\n return render_template('profile.html', user = user)\n\n\n###############################################################################\n# #\n# CATEGORIES ROUTES #\n# #\n###############################################################################\n\n\[email protected]('/categories')\ndef show_categories():\n \"\"\"Display all user categories and the option to add a new category.\"\"\"\n\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n base_categories = BaseCategory.query.all()\n\n return render_template(\"categories.html\", \n categories=categories,\n base_categories=base_categories)\n\n\[email protected]('/categories/<category_id>')\ndef show_category_articles(category_id):\n \"\"\"Display articles of clothing belonging to selected category.\"\"\"\n\n # TODO: Possible refactor is to save repetitive queries to a variable &\n # only execute inside the route\n articles = Article.query.filter(Article.category_id == category_id,\n Article.user_id == session['user_id']).all()\n category = Category.query.filter(Category.category_id == category_id,\n Category.user_id == session['user_id']).one()\n\n downsize_image(articles, width=300, height=300)\n\n return render_template(\"single-category.html\", \n articles=articles,\n category=category)\n\n\[email protected]('/add-category')\ndef show_create_category_form():\n \"\"\"Display form to create a new user category.\"\"\"\n\n base_categories = BaseCategory.query.all()\n\n return render_template(\"add-category.html\",\n base_categories=base_categories)\n\n\[email protected]('/create-category', methods=['POST'])\ndef add_category():\n \"\"\"Adds new user-created category and redirects to /categories.\"\"\"\n\n name = request.form.get('category-name')\n base_category = request.form.get('base-category')\n description = request.form.get('category-description')\n\n new_category = Category(user_id =session['user_id'],\n base_category_id =base_category,\n name =name,\n description =description)\n\n db.session.add(new_category)\n db.session.commit()\n\n flash(f\"Created {new_category.name} successfully\")\n\n return redirect('/categories')\n\n\n###############################################################################\n# #\n# ARTICLES ROUTES #\n# #\n###############################################################################\n\n\[email protected]('/articles')\ndef show_articles():\n \"\"\"Display all articles of clothing and the option to add a new article.\"\"\"\n\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n categories = sort_categories_by_base(categories)\n articles = Article.query.filter(Article.user_id == session['user_id']).all()\n\n downsize_image(articles, width=300, height=300)\n\n return render_template(\"articles.html\", \n articles=articles,\n categories=categories)\n\n\[email protected]('/add-article')\ndef show_create_article_form():\n \"\"\"Display form to create a new article of clothing.\"\"\"\n\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n tags = Tag.query.filter(Tag.user_id == session['user_id']).all()\n\n return render_template(\"add-article.html\",\n categories=categories,\n tags=tags)\n\n\[email protected]('/create-article', methods=['POST'])\ndef add_article():\n \"\"\"Adds new clothing article and redirects to the previous category page.\"\"\"\n\n category_id = request.form.get('category')\n description = request.form.get('article-description')\n file = request.files['article-image-upload']\n tag_ids = request.form.getlist('article-tags')\n new_tag_string = request.form.get('new-tags')\n purchase_price = request.form.get('purchase-price') \n\n category = Category.query.get(category_id)\n\n if not allowed_file(file.filename):\n flash(f'File extension .{file.filename.rsplit(\".\", 1)[1]} not allowed')\n if file and allowed_file(file.filename):\n \n # Sanitizes user input\n filename = secure_filename(file.filename)\n\n # Cloudinary upload function: 1) folders by user and category name, \n # 2) unique filename is true, \n # 3) use cloudinary's AI to remove background \n # ^ (commented out b/c paid service)\n upload_file = upload(file,\n folder = f\"user/{session['user_email']}/{category.name}\",\n unique_filename = 1,\n # background_removal = \"cloudinary_ai\",\n )\n\n # For purchase_price, an empty string not ok, but okay to pass None\n new_article = Article(user_id = session['user_id'],\n category_id = category_id,\n image = upload_file['secure_url'],\n description = description,\n purchase_price = purchase_price or None)\n\n all_tags = []\n for tag_id in tag_ids:\n all_tags.append(Tag.query.filter_by(tag_id = tag_id).one())\n\n # Any newly created tags should be added to this as well\n all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])\n\n # Then create all the tag relationships\n for tag in all_tags:\n new_article.add_tag(tag)\n\n db.session.add(new_article)\n db.session.commit()\n flash(f\"Created new item in {category.name}\")\n\n return redirect(f'/categories/{category_id}')\n\n\[email protected]('/delete-article', methods=['POST'])\ndef delete_article():\n \"\"\"Deletes an article.\"\"\"\n\n article_id = request.form.get('article-to-delete')\n article = Article.query.filter_by(article_id = article_id).one()\n\n article.delete()\n\n return redirect('/articles')\n\n\[email protected]('/articles/<article_id>')\ndef show_article_detail(article_id):\n \"\"\"Display specific article details.\"\"\"\n\n article = Article.query.filter_by(article_id = article_id).first()\n tags = Tag.query.filter(Tag.user_id == session['user_id']).all()\n\n return render_template(\"single-article.html\", \n article=article,\n tags=tags)\n\n\[email protected]('/update-article', methods=['POST'])\ndef update_article_details():\n \"\"\"Updates an article's details.\"\"\"\n\n new_price = request.form.get('purchase-price')\n article_id = request.form.get('article-to-edit')\n tag_ids = request.form.getlist('article-tags')\n new_tag_string = request.form.get('new-tags')\n article = Article.query.filter_by(article_id = article_id).one()\n\n if new_price:\n article.update({'purchase_price' : new_price})\n\n all_tags = []\n for tag_id in tag_ids:\n all_tags.append(Tag.query.filter_by(tag_id = tag_id).one())\n\n # TODO: Brute force method - remove all tags before appending\n # Better: Check for discrepancies; remove unchecked, then proceed\n\n # Any newly created tags should be added to this as well\n all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])\n\n # Then create all the tag relationships\n for tag in all_tags:\n article.add_tag(tag)\n\n return redirect(f'/articles/{article_id}')\n\n\n###############################################################################\n# #\n# OUTFITS ROUTES #\n# #\n###############################################################################\n\n\[email protected]('/outfits')\ndef show_outfits():\n \"\"\"Display all outfits and the option to add a new outfit.\"\"\"\n\n outfits = Outfit.query.filter(Outfit.user_id == session['user_id']).all()\n\n return render_template('outfits.html', outfits=outfits)\n\n\[email protected]('/add-outfit')\ndef show_create_outfit_form():\n \"\"\"Display form to create a new outfit.\"\"\"\n\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n tags = Tag.query.filter(Tag.user_id == session['user_id']).all()\n\n for category in categories:\n downsize_image(category.articles, width=300, height=300)\n\n return render_template('add-outfit.html', categories=categories, tags=tags)\n\n\[email protected]('/create-outfit', methods=['POST'])\ndef add_outfit():\n \"\"\"Adds new outfit and redirects to the previous outfits page.\"\"\"\n\n description = request.form.get('outfit-description')\n name = request.form.get('outfit-name')\n article_ids = request.form.getlist('articles-to-add')\n tag_ids = request.form.getlist('outfit-tags')\n new_tag_string = request.form.get('new-tags')\n\n # First create a new Outfit in the db\n outfit = Outfit(user_id=session['user_id'],\n description=description,\n name=name)\n db.session.add(outfit)\n\n # Then create all the article relationships\n for article_id in article_ids:\n article = Article.query.filter(Article.article_id == article_id).one()\n outfit.add_article(article)\n\n all_tags = []\n for tag_id in tag_ids:\n all_tags.append(Tag.query.filter_by(tag_id = tag_id).one())\n\n # Any newly created tags should be added to this as well\n all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])\n\n # Then create all the tag relationships\n for tag in all_tags:\n outfit.add_tag(tag)\n\n db.session.commit()\n\n text = name if name else description\n\n flash(f\"Created new outfit: {text}\")\n\n return redirect('/outfits')\n\n\[email protected]('/outfits/<outfit_id>')\ndef show_outfit_detail(outfit_id):\n \"\"\"Display specific outfit details.\"\"\"\n\n outfit = Outfit.query.filter_by(outfit_id = outfit_id).first()\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n categories = sort_categories_by_base(categories)\n tags = Tag.query.filter(Tag.user_id == session['user_id']).all()\n\n for category in categories:\n downsize_image(category.articles, width=300, height=300)\n\n return render_template('single-outfit.html',\n outfit=outfit,\n categories=categories,\n tags=tags)\n\n\n# TODO: this all feels quite inefficient - is there a better way?\ndef sort_categories_by_base(categories):\n \"\"\"Puts user categories in order by base category type.\"\"\"\n\n categories_2 = []\n\n for category in categories:\n if category.base_category_id == 'tops':\n categories_2.append(category)\n\n for category in categories:\n if category.base_category_id == 'bottoms':\n categories_2.append(category)\n\n for category in categories:\n if category.base_category_id == 'fulls':\n categories_2.append(category)\n\n for category in categories:\n if category.base_category_id == 'outers':\n categories_2.append(category)\n\n for category in categories:\n if category.base_category_id == 'shoes':\n categories_2.append(category)\n elif category.base_category_id == 'hats':\n categories_2.append(category)\n elif category.base_category_id == 'access':\n categories_2.append(category)\n elif category.base_category_id == 'jewels':\n categories_2.append(category)\n elif category.base_category_id == 'others':\n categories_2.append(category)\n\n return categories_2\n\n\[email protected]('/update-outfit', methods=['POST'])\ndef update_outfit_details():\n \"\"\"Updates an outfit's details.\"\"\"\n\n outfit_id = request.form.get('outfit-to-edit')\n new_tag_string = request.form.get('new-tags')\n tag_ids = request.form.getlist('outfit-tags')\n outfit = Outfit.query.filter_by(outfit_id = outfit_id).one()\n\n all_tags = []\n for tag_id in tag_ids:\n all_tags.append(Tag.query.filter_by(tag_id = tag_id).one())\n\n # Any newly created tags should be added to this as well\n all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])\n\n # Then create all the tag relationships\n for tag in all_tags:\n outfit.add_tag(tag)\n\n return redirect(f'/outfits/{outfit_id}')\n\n\[email protected]('/delete-outfit', methods=['POST'])\ndef delete_outfit():\n \"\"\"Deletes an outfit.\"\"\"\n\n outfit_id = request.form.get('outfit-to-delete')\n outfit = Outfit.query.filter_by(outfit_id = outfit_id).one()\n\n outfit.delete()\n\n return redirect('/outfits')\n\n\[email protected]('/add-article/<outfit_id>/<article_id>')\ndef add_article_to_outfit(outfit_id, article_id):\n \"\"\"Add article to outfit and update the page.\"\"\"\n \n outfit = Outfit.query.filter(Outfit.outfit_id == outfit_id).one()\n article = Article.query.filter(Article.article_id == article_id).one()\n\n outfit.add_article(article)\n categories = Category.query.filter(Category.user_id == session['user_id']).all()\n\n return redirect(f'/outfits/{outfit_id}')\n\n\[email protected]('/remove-article/<outfit_id>/<article_id>')\ndef remove_article_from_outfit(outfit_id, article_id):\n \"\"\"Remove article from outfit and update the page.\"\"\"\n\n outfit = Outfit.query.filter(Outfit.outfit_id == outfit_id).one()\n article = Article.query.filter(Article.article_id == article_id).one()\n\n outfit.remove_article(article)\n\n return redirect(f'/outfits/{outfit_id}')\n\n\n###############################################################################\n# #\n# EVENTS ROUTES #\n# #\n###############################################################################\n\n\[email protected]('/events')\ndef show_events():\n \"\"\"Display all events and the option to add a new event.\"\"\"\n\n evt_by_month = {}\n # events = WearEvent.query.filter(WearEvent.user_id == \n # session['user_id']).order_by(WearEvent.date.desc()).all()\n events = WearEvent.query.filter(WearEvent.user_id == \n session['user_id']).order_by(WearEvent.date).all()\n for event in events:\n month = event.date.month\n year = event.date.year\n evt_by_month[year] = evt_by_month.get(year, {})\n evt_by_month[year][month] = evt_by_month[year].get(month, [])\n evt_by_month[year][month].append(event)\n\n return render_template('events.html', evt_by_month=evt_by_month, MONTHS=MONTHS)\n\n\[email protected]('/add-event')\ndef show_create_event_form():\n \"\"\"Display form to create a new wear event/clothing log.\"\"\"\n\n outfits = Outfit.query.filter(Outfit.user_id == session['user_id']).all()\n tags = Tag.query.filter(Tag.user_id == session['user_id']).all()\n\n return render_template('add-event.html',\n CITIES=CITIES,\n outfits=outfits,\n tags=tags)\n\n\[email protected]('/create-event', methods=['POST'])\ndef add_event():\n \"\"\"Adds new event and redirects to the previous events page.\"\"\"\n \n # String unpacking to pass as arguments to datetime\n year, month, day = request.form.get('event-date').split('-')\n time = request.form.get('event-time')\n city = request.form.get('city')\n description = request.form.get('event-description')\n name = request.form.get('event-name')\n outfit_id = request.form.get('event-outfit')\n tag_ids = request.form.getlist('event-tags')\n new_tag_string = request.form.get('new-tags')\n\n if time:\n hour, minute = time.split(':')\n date_time = datetime(int(year), int(month), int(day), int(hour), int(minute))\n else:\n date_time = datetime(int(year), int(month), int(day), int(10))\n\n # First create a new Event in the db\n event = WearEvent(user_id=session['user_id'],\n outfit_id=outfit_id or None,\n description=description or None,\n name=name or f'{month}-{day}-{year}',\n date=date_time)\n\n # If location is provided, get weather\n if city:\n event.set_weather(CITIES[city]['lat'], CITIES[city]['lng'])\n\n if outfit_id:\n outfit = Outfit.query.filter_by(outfit_id = outfit_id).one()\n outfit.incr_times_worn()\n\n all_tags = []\n for tag_id in tag_ids:\n all_tags.append(Tag.query.filter_by(tag_id = tag_id).one())\n\n # Any newly created tags should be added to this as well\n all_tags += Tag.parse_str_to_tag(new_tag_string, session['user_id'])\n\n # Then create all the tag relationships\n for tag in all_tags:\n event.add_tag(tag)\n\n db.session.add(event)\n db.session.commit()\n\n text = name if name else description\n\n flash(f\"Created new event: {text}\")\n\n return redirect('/events')\n\n\[email protected]('/update-event', methods=['POST'])\ndef update_event_details():\n \"\"\"Update an event's details.\"\"\"\n \n event_id = request.form.get('event-to-edit')\n event = WearEvent.query.filter_by(wear_event_id = event_id).one()\n outfit_id = request.form.get('event-outfit')\n\n options = {}\n\n # TODO: this feels yucky\n name = request.form.get('update-name')\n description = request.form.get('update-description')\n tags = request.form.getlist('update-tags')\n if name:\n options['name'] = name\n if description:\n options['description'] = description\n for tag_id in tags:\n tag = Tag.query.filter_by(tag_id = tag_id).one()\n event.add_tag(tag)\n if outfit_id:\n options['outfit_id'] = outfit_id\n outfit = Outfit.query.filter_by(outfit_id = outfit_id).one()\n outfit.incr_times_worn()\n\n event.update(options)\n db.session.commit()\n\n return redirect(f'/events/{event_id}')\n\n\[email protected]('/events/<wear_event_id>')\ndef show_event_details(wear_event_id):\n \"\"\"Display specific event details.\"\"\"\n\n event = WearEvent.query.filter_by(wear_event_id = wear_event_id).first()\n tags = Tag.query.filter_by(user_id = session['user_id']).all()\n outfits = Outfit.query.filter_by(user_id = session['user_id']).all()\n\n for outfit in outfits:\n downsize_image(outfit.articles, width=300, height=300)\n\n return render_template('single-event.html',\n event=event,\n tags=tags,\n outfits=outfits)\n\n\[email protected]('/delete-event', methods=['POST'])\ndef delete_event():\n \"\"\"Deletes an event.\"\"\"\n\n wear_event_id = request.form.get('event-to-delete')\n event = WearEvent.query.filter_by(wear_event_id = wear_event_id).one()\n\n event.delete()\n\n return redirect('/events')\n\n\n# @app.route('/etsy-api')\n# def test_etsy_api():\n # \"\"\"Test some Etsy API calls.\"\"\"\n\n # json_listings = etsy_api.getInterestingListings()\n\n # return render_template('api-test.html', json_listings=json_listings)\n\n\[email protected]('/ds-weather')\ndef test_weather_darksky():\n \"\"\"Test DarkSky's API & DarkSkyLib wrapper\"\"\"\n\n city = CITIES['SFO']\n # Dark Sky requires a date in isoformat\n weather = forecast.Forecast(dark_sky['secret'], city['lat'], city['lng'])\n\n hourly = weather.hourly\n\n print(datetime.time(datetime.now()))\n for hour in hourly:\n\n hour.datestr = datetime.utcfromtimestamp(hour['time']).strftime('%m-%d-%y %H:%M')\n\n # for forecast in forecasts:\n # forecast.temp = int(round(forecast.get_temperature('fahrenheit')['temp'],0))\n # forecast.datestr = datetime.utcfromtimestamp(\n # forecast.get_reference_time()).strftime('%H:%M')\n # today = forecasts[0:8]\n\n return render_template('ds-weather.html', hourly=hourly)\n\n\nif __name__ == \"__main__\":\n # We have to set debug=True here, since it has to be True at the\n # point that we invoke the DebugToolbarExtension\n app.debug = True\n # make sure templates, etc. are not cached in debug mode\n app.jinja_env.auto_reload = app.debug\n\n # # Use the DebugToolbar\n # DebugToolbarExtension(app)\n\n connect_to_db(app)\n app.run(port=5000, host='0.0.0.0')" }, { "alpha_fraction": 0.5471206903457642, "alphanum_fraction": 0.5547596216201782, "avg_line_length": 35.61680221557617, "blob_id": "13ae42b1726fd29afebcf84c674907ee3dad87d3", "content_id": "dffb7e60985bd6d63340f1c35b787e47b0c0d9a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 35738, "license_type": "no_license", "max_line_length": 123, "num_lines": 976, "path": "/model.py", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "\"\"\"Models & db functions for wardrobe manager project.\"\"\"\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import asc, update\nfrom datetime import date, timedelta, datetime as dt\nimport os\nfrom darksky import forecast\nimport textwrap\n\ndb = SQLAlchemy()\n\ndark_sky = ({\n 'secret':os.environ.get('DARK_SKY_API_SECRET'),\n })\n\n##############################################################################\n# Model definitions\n\nclass User(db.Model):\n \"\"\"User of wardrobe manager website.\n \n >>> dr_horrible = User(user_id = 0, email = '[email protected]', \\\n password = 'talktoPennyTODAY')\n >>> dr_horrible\n <user_id=0 [email protected]>\n \"\"\"\n\n __tablename__ = 'users'\n\n user_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n email = db.Column(db.String(64), nullable=False)\n password = db.Column(db.String(64), nullable=False)\n\n # Define relationships to Article, Outfit, and Category\n articles = db.relationship('Article', backref='user',\n cascade='all, delete, delete-orphan')\n outfits = db.relationship('Outfit', backref='user',\n cascade='all, delete, delete-orphan')\n categories = db.relationship('Category', backref='user',\n cascade='all, delete, delete-orphan')\n events = db.relationship('WearEvent', backref='user',\n cascade='all, delete, delete-orphan')\n\n def update(self, options):\n \"\"\"Update the user's information.\"\"\"\n\n self.email = options.get('email', self.email)\n self.password = options.get('password', self.password)\n db.session.commit()\n\n # TODO: learn about cascade delete as in relationships above, then remove\n # the remove-all-user-data functionality in delete().\n def delete(self):\n \"\"\"Remove the user.\"\"\"\n\n # First remove all of a user's data\n # Should be unnecessary with cascade-delete above, \n # remove once testing complete\n\n # Less good approach:\n # for outfit in self.outfits:\n # db.session.delete(outfit)\n # for article in self.articles:\n # db.session.delete(article)\n # for category in self.categories:\n # db.session.delete(category)\n\n # Maybe works?\n # ? self.outfits.delete()\n # ? Outfit.query.filter_by(user_id=self.user_id).delete()\n\n outfits.delete().where(outfits.user_id == self.user_id)\n articles.delete().where(articles.user_id == self.user_id)\n categories.delete().where(categories.user_id == self.user_id)\n\n # Then remove the account\n db.session.delete(self)\n db.session.commit()\n\n def calculate_value(self):\n \"\"\"Sum value of all articles a user owns.\"\"\"\n \n sum = 0\n for article in self.articles:\n sum += article.purchase_price\n return sum\n\n def get_stats(self):\n \"\"\"Count all of a user's outfits, articles, categories, and tags.\"\"\"\n\n # TODO: set up instance attribute stats somewhere else besides here :(\n self.stats = {}\n self.stats['counts'] = {}\n self.stats['most_worn'] = {}\n self.stats['best_value'] = {}\n self.stats['most_used'] = {}\n\n # TODO: refactor using something related to COUNT from SQLAlchemy\n self.__get_outfit_stats__()\n self.__get_article_stats__()\n self.__get_tag_stats__()\n self.__get_category_stats__()\n self.__get_event_stats__()\n\n return self.stats\n\n def __get_outfit_stats__(self):\n \"\"\"Stats for user's outfits.\"\"\"\n\n # TODO: refactor using something related to COUNT from SQLAlchemy\n outfits = self.get_outfits_query().order_by(Outfit.times_worn).all()\n self.stats['counts']['outfits'] = len(outfits)\n\n if outfits:\n self.stats['most_worn']['outfit'] = outfits[-1]\n\n best_value = outfits[-1].calculate_value()\n best_nonzero_value = -1\n\n for outfit in outfits:\n if outfit.times_worn > 0:\n value = outfit.calculate_value() / outfit.times_worn\n outfit.value = value\n if outfit.value < best_value:\n best_value = outfit.value\n self.stats['best_value']['outfit'] = outfit\n\n # Store the first nonzero outfit value, then store any subsequent \n # value better than it\n if outfit.value > 0 and (best_nonzero_value == -1 or \n outfit.value < best_nonzero_value):\n best_nonzero_value = outfit.value\n self.stats['best_value']['nonzero_outfit'] = outfit\n\n def __get_article_stats__(self):\n \"\"\"Stats for user's articles.\"\"\"\n\n # TODO: refactor using something related to COUNT from SQLAlchemy\n articles = self.get_articles_query().order_by(Article.times_worn).all()\n self.stats['counts']['articles'] = len(articles)\n\n if articles:\n self.stats['most_worn']['article'] = articles[-1]\n\n best_value = articles[-1].purchase_price\n best_nonzero_value = -1\n\n for article in articles:\n if article.times_worn > 0 and type(article.purchase_price) == float:\n value = article.purchase_price / article.times_worn\n article.value = value\n if article.value < best_value:\n best_value = article.value\n self.stats['best_value']['article'] = article\n\n # Store the first nonzero article value, then store any subsequent \n # value better than it\n if article.value > 0 and (best_nonzero_value == -1 or \n article.value < best_nonzero_value):\n best_nonzero_value = article.value\n self.stats['best_value']['nonzero_article'] = article\n\n def __get_category_stats__(self):\n \"\"\"Stats for user's categories.\"\"\"\n\n categories = self.get_categories_query().all()\n self.stats['counts']['categories'] = len(categories)\n\n for category in categories:\n # Reset for every loop of categories\n best_value = 0\n best_nonzero_value = -1\n articles = Article.query.filter(Article.category_id == category.category_id).order_by(Article.times_worn).all()\n if articles:\n self.stats['most_worn'][category.name] = articles[-1]\n # Set best_value to a purchase price of an article the list, possibly 0\n best_value = articles[-1].purchase_price\n self.stats['best_value'][category.name] = {'article': None,\n 'nonzero_article': None}\n \n for article in articles:\n if article.times_worn > 0 and type(article.purchase_price) in [float, int]:\n article.value = article.purchase_price / article.times_worn\n if article.value < best_value:\n best_value = article.value\n self.stats['best_value'][category.name]['article'] = article\n\n # Store the first nonzero article value, then store any subsequent \n # value better than it\n if article.value > 0 and (best_nonzero_value == -1 or \\\n article.value < best_nonzero_value):\n best_nonzero_value = article.value\n self.stats['best_value'][category.name]['nonzero_article'] = article\n\n def __get_event_stats__(self):\n \"\"\"Stats for user's events.\"\"\"\n\n events = self.get_events_query().all()\n self.stats['counts']['events'] = len(events)\n\n def __get_tag_stats__(self):\n \"\"\"Stats for user's tags.\"\"\"\n\n tags = self.get_tags_query().all()\n self.stats['counts']['tags'] = len(tags)\n self.stats['most_used']['article'] = {}\n self.stats['most_used']['outfit'] = {}\n self.stats['most_used']['event'] = {}\n\n tag_article_count = 0\n tag_outfit_count = 0\n tag_event_count = 0\n\n if tags:\n # TODO: refactor using MAX or a heap data structure\n # Or another table?\n for tag in tags:\n count_ta = TagArticle.query.filter(TagArticle.tag_id == tag.tag_id).count()\n count_to = TagOutfit.query.filter(TagOutfit.tag_id == tag.tag_id).count()\n count_te = TagEvent.query.filter(TagEvent.tag_id == tag.tag_id).count()\n if count_ta > tag_article_count:\n tag_article_count = count_ta\n self.stats['most_used']['article']['tag'] = tag\n self.stats['most_used']['article']['count'] = count_ta\n if count_to > tag_outfit_count:\n tag_outfit_count = count_to\n self.stats['most_used']['outfit']['tag'] = tag\n self.stats['most_used']['outfit']['count'] = count_to\n if count_te > tag_event_count:\n tag_event_count = count_te\n self.stats['most_used']['event']['tag'] = tag\n self.stats['most_used']['event']['count'] = count_te\n\n def get_categories_query(self):\n \"\"\"Start a query for all of a user's categories.\"\"\"\n \n categories_query = Category.query.filter_by(user_id = self.user_id)\n return categories_query\n\n def get_articles_query(self):\n \"\"\"Start a query for all of a user's articles.\"\"\"\n \n articles_query = Article.query.filter_by(user_id = self.user_id)\n return articles_query\n\n def get_outfits_query(self):\n \"\"\"Query for all of a user's outfits.\"\"\"\n \n outfits_query = Outfit.query.filter_by(user_id = self.user_id)\n return outfits_query\n\n def get_tags_query(self):\n \"\"\"Query for all of a user's tags.\"\"\"\n \n tags_query = Tag.query.filter_by(user_id = self.user_id)\n return tags_query\n\n def get_events_query(self):\n \"\"\"Query for all of a user's events.\"\"\"\n \n events_query = WearEvent.query.filter_by(user_id = self.user_id)\n return events_query\n\n def __repr__(self):\n return f'<user_id={self.user_id} email={self.email}>'\n\n\nclass Category(db.Model):\n \"\"\"User defined categories of clothing articles, inheriting from standard categories.\n \n >>> lab_coats = Category(category_id = 0, name = 'Lab Coats', description = \\\n 'Classic length lab coats', user_id = 0, base_category_id = 'fulls')\n >>> lab_coats\n <category_id=0 name=Lab Coats>\n >>> gloves = Category(category_id = 1, name = 'Gloves', description = \\\n 'Chemical resistant 18\" gloves', user_id = 0, base_category_id = 'others')\n >>> gloves\n <category_id=1 name=Gloves>\n \"\"\"\n\n __tablename__ = 'categories'\n\n category_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name = db.Column(db.String(64), nullable=False)\n description = db.Column(db.String(256), nullable=True)\n\n user_id = db.Column(db.Integer,\n db.ForeignKey('users.user_id'),\n nullable=False)\n base_category_id = db.Column(db.String(10), \n db.ForeignKey('base_categories.base_category_id'), \n nullable=False)\n\n # Define relationship to BaseCategory and Article\n base_category = db.relationship('BaseCategory', backref='categories')\n articles = db.relationship('Article', backref='category')\n\n def update(self, options):\n \"\"\"Update the category's information.\"\"\"\n\n self.name = options.get('name', self.name)\n self.description = options.get('description', self.description)\n db.session.commit()\n\n # TODO: Warn if articles become orphaned as a result.\n # Best refactoring would be allowing a user to select some/all via filters\n # and reassign to a new category.\n def delete(self):\n \"\"\"Remove the category.\"\"\"\n\n db.session.delete(self)\n db.session.commit()\n\n def __repr__(self):\n return f'<category_id={self.category_id} name={self.name}>'\n\n\nclass Article(db.Model):\n \"\"\"Article of clothing.\n \n NOTE - The repr will throw an AttributeError if created in Repl because the \n relationship between article and category does not exist.\n\n >>> std_lab_coat = Article(article_id = 0, image = 'white_coat.png', \\\n description = '41\" white lab coat with 3 button closure', \\\n purchase_price = 48.99, times_worn = 3, user_id = 0, category_id = 0)\n >>> std_lab_coat\n <article_id=0 category_name=Lab Coats description=41\" white lab c>\n >>> std_lab_coat.description\n '41\" white lab coat with 3 button closure'\n\n >>> white_gloves = Article(article_id = 1, image = 'white_gloves.png', \\\n description = '18\" white work gloves', purchase_price = 8.99, \\\n times_worn = 3, user_id = 0, category_id = 1)\n >>> white_gloves\n <article_id=1 category_name=Gloves description=18\" white work >\n >>> white_gloves.times_worn\n 3\n \"\"\"\n\n __tablename__ = 'articles'\n\n article_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n description = db.Column(db.String(256), nullable=True)\n image = db.Column(db.String(), nullable=True)\n purchase_price = db.Column(db.Float, nullable=True)\n times_worn = db.Column(db.Integer, default=0, nullable=False)\n sell_price = db.Column(db.Float, nullable=True)\n\n user_id = db.Column(db.Integer,\n db.ForeignKey('users.user_id'),\n nullable=False)\n category_id = db.Column(db.Integer,\n db.ForeignKey('categories.category_id'),\n nullable=False)\n\n # Define relationship to Tag\n tags = db.relationship('Tag',\n backref='articles',\n secondary='tags_articles')\n\n def update(self, options):\n \"\"\"Update the article's information.\n\n >>> white_gloves.update({'purchase_price': 7.99})\n >>> white_gloves.purchase_price\n 7.99\n \"\"\"\n\n self.category_id = options.get('category_id', self.category_id)\n self.description = options.get('description', self.description)\n self.purchase_price = options.get('purchase_price', self.purchase_price)\n self.sell_price = options.get('sell_price', self.sell_price)\n\n db.session.commit()\n\n def add_tag(self, tag):\n \"\"\"Add the tag to the article.\"\"\"\n\n self.tags.append(tag)\n db.session.commit()\n\n def remove_tag(self, tag):\n \"\"\"Remove the tag from the article.\"\"\"\n\n self.tags.remove(tag)\n db.session.commit()\n\n def delete(self):\n \"\"\"Remove the article.\"\"\"\n\n db.session.delete(self)\n db.session.commit()\n\n def incr_times_worn(self):\n \"\"\"Increase times_worn attribute.\n\n >>> white_gloves.times_worn\n 3\n >>> white_gloves.incr_times_worn()\n >>> white_gloves.times_worn\n 4\n \"\"\"\n\n self.times_worn += 1\n db.session.commit()\n\n # Dedent doesn't work! White space is not eliminated :(\n # def __repr__(self):\n # return textwrap.dedent(\n # f'<article_id={self.article_id} \\\n # category.name={self.category.name} \\\n # description={self.description:.20}>'\n # )\n def __repr__(self):\n return f'<article_id={self.article_id} category.name={self.category.name} description={self.description:.20}>'\n\nclass Outfit(db.Model):\n \"\"\"Outfit composed of articles.\n \n >>> work_outfit = Outfit(outfit_id = 0, name = 'Work Outfit 1', \\\n description = 'White coat, white gloves, goggles, and work boots', \\\n times_worn = 3, user_id = 0)\n >>> work_outfit\n <outfit_id=0 name=Work Outfit 1 description=White coat, whi>\n\n \"\"\"\n \n __tablename__ = 'outfits'\n\n outfit_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name = db.Column(db.String(64), nullable=True)\n description = db.Column(db.String(256), nullable=True)\n times_worn = db.Column(db.Integer, default='0', nullable=False)\n\n user_id = db.Column(db.Integer, db.ForeignKey('users.user_id'), nullable=False)\n\n # Define relationship to Article and Tag\n articles = db.relationship('Article', \n backref='outfits', \n secondary='articles_outfits')\n tags = db.relationship('Tag',\n backref='outfits',\n secondary='tags_outfits')\n events = db.relationship('WearEvent',\n backref='outfit')\n\n # Outfit update methods: update, add_article, remove_article\n def update(self, options):\n \"\"\"Update the outfit's information.\"\"\"\n\n self.name = options.get('name', self.name)\n self.description = options.get('description', self.description)\n self.times_worn = options.get('times_worn', self.times_worn)\n db.session.commit()\n\n def add_article(self, article):\n \"\"\"Add the article to the outfit.\"\"\"\n\n # TODO: check if an article of a cateogry doesn't alrady exist\n self.articles.append(article)\n db.session.commit()\n\n def remove_article(self, article):\n \"\"\"Remove the article from the outfit.\"\"\"\n\n self.articles.remove(article)\n db.session.commit()\n\n def add_tag(self, tag):\n \"\"\"Add the tag to the outfit.\"\"\"\n\n self.tags.append(tag)\n db.session.commit()\n\n def remove_tag(self, tag):\n \"\"\"Remove the tag from the outfit.\"\"\"\n\n self.tags.remove(tag)\n db.session.commit()\n\n # Outfit delete method\n def delete(self):\n \"\"\"Remove the outfit.\"\"\"\n\n db.session.delete(self)\n db.session.commit()\n\n # Assorted outfit methods\n def calculate_value(self):\n \"\"\"Sum value of all articles in the outfit.\"\"\"\n \n sum = 0\n for article in self.articles:\n if article.purchase_price:\n sum += article.purchase_price\n return sum\n\n def incr_times_worn(self):\n \"\"\"Increase times_worn attribute.\"\"\"\n\n self.times_worn += 1\n for article in self.articles:\n article.incr_times_worn()\n\n db.session.commit()\n\n def is_category_in_outfit(self, category):\n \"\"\"Returns whether an article of the given category exists in the outfit.\"\"\"\n for article in self.articles:\n if article.category_id == category.category_id:\n return True\n return False\n\n def count_category_articles(self, category):\n \"\"\"For the given category, count articles belonging to that category in outfit.\"\"\"\n count = 0\n for article in self.articles:\n if article.category_id == category.category_id:\n count += 1\n return count\n\n def last_worn(self):\n \"\"\"Return last date an outfit was worn.\"\"\"\n\n # What was the most recent wear date of top_outfit:\n wear_dates = WearEvent.query.filter_by(outfit_id = self.outfit_id).order_by(WearEvent.date).all()\n\n if wear_dates:\n return wear_dates[-1]\n else:\n return None\n\n def __repr__(self):\n return f'<outfit_id={self.outfit_id} name={self.name} description={self.description:.20}>'\n\n\nclass Tag(db.Model):\n \"\"\"Tag for articles and outfits.\"\"\"\n \n __tablename__ = 'tags'\n\n tag_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name = db.Column(db.String(32), nullable=False)\n\n user_id = db.Column(db.Integer,\n db.ForeignKey('users.user_id'),\n nullable=False)\n\n def parse_str_to_tag(tag_string, user_id):\n \"\"\"Convert comma-sep string to Tag objects.\"\"\"\n\n new_tags = []\n\n if tag_string:\n tags = tag_string.split(',')\n for idx, tag_name in enumerate(tags):\n tag_name.lstrip()\n tag = Tag(user_id=user_id,\n name=tag_name)\n new_tags.append(tag)\n # db.session.add(tag)\n\n # db.session.commit()\n # Return new tags so we can create relationships with them\n return new_tags\n\n def update(self, options):\n \"\"\"Update the tag's information.\"\"\"\n\n self.name = options.get('name', self.name)\n db.session.commit()\n\n def delete(self):\n \"\"\"Remove the tag.\"\"\"\n\n db.session.delete(self)\n db.session.commit()\n\n def __repr__(self):\n return f'<tag_id={self.tag_id} name={self.name}>'\n\n\n# TODO: 1) Modify table to increase name column from 32 -> 128 char\n# TODO: 2) Replace WearEvent / wear_events -> Event / events\n# TODO: 3) Add a precipitation chance column\n# TODO: 4) Add icon column (icon most reliable \"summary\")\nclass WearEvent(db.Model):\n \"\"\"Instances of outfits being worn. Can be past or future; outfits can be added later.\"\"\"\n \n __tablename__ = 'wear_events'\n\n wear_event_id = db.Column(db.Integer, autoincrement=True, primary_key=True)\n name = db.Column(db.String(128), nullable=False)\n description = db.Column(db.String(256), nullable=True)\n date = db.Column(db.DateTime, nullable=False)\n temperature = db.Column(db.Float, nullable=True)\n weather_cond = db.Column(db.String(128), nullable=True)\n # precip_probability = db.Column(db.Float, nullable=True)\n # weather_icon = db.Column(db.String(128), nullable=True)\n\n user_id = db.Column(db.Integer,\n db.ForeignKey('users.user_id'),\n nullable=False)\n outfit_id = db.Column(db.Integer,\n db.ForeignKey('outfits.outfit_id'),\n nullable=True) # Outfit can be added after creation\n\n # Define relationship to Tag\n tags = db.relationship('Tag',\n backref='events',\n secondary='tags_events')\n\n def update(self, options):\n \"\"\"Update the event's information.\"\"\"\n\n self.name = options.get('name', self.name)\n self.description = options.get('description', self.description)\n self.date = options.get('date', self.date)\n self.outfit_id = options.get('outfit_id', self.outfit_id)\n db.session.commit()\n\n # TODO: After #3/4 above, set precip chance and icon instance attributes.\n def set_weather(self, lat=37.774929, lng=-122.419418):\n \"\"\"Assign temperature and weather conditions for date at latitude & longitude.\n \n >>> new_evt = WearEvent(wear_event_id=0, name='Party', \\\n description='Birthday party at Monarch', date=dt(2019, 11, 16, 20, 30), \\\n user_id=1)\n >>> new_evt\n <wear_event_id=0 name=Party user_id=1>\n >>> key = ####### \n >>> dark_sky['secret'] = key\n >>> sflat=37.774929\n >>> sflng=-122.419418\n >>> new_evt.set_weather(sflat, sflng)\n >>> new_evt.temperature\n 58.77\n >>> new_evt.weather_cond\n 'Clear'\n \"\"\"\n\n # Dark Sky requires a date in isoformat\n weather = forecast.Forecast(dark_sky['secret'], lat, lng, time=self.date.isoformat())\n self.temperature = weather.temperature\n self.weather_cond = weather.summary\n # self.weather_icon = weather.icon\n # self.precip_probability = weather.precipProbability\n db.session.commit()\n\n # TODO: If outfit has been worn this week, it has a lower priority.\n # Perhaps the outfit_dict should sort by quanity of tags matched and return \n # in this order so we can move down the list as options are eliminated.\n def match_tags(self):\n \"\"\"Compare event's tags to outfit tags. Dictionary returns all matches.\n\n Matches are stored as \n outfit: [<tag1>, <tag2>...] \n and as \n tag_count (e.g. 3): [<outfit1>, <outfit2>...]\n with one special key 'top_pick'\n 'top_pick': <outfit8>\n \"\"\"\n\n outfit_dict = {}\n outfit_dict['top_pick'] = ''\n outfit_dict['all_picks'] = []\n\n if self.tags: \n most_tags = 0\n\n for tag in self.tags:\n for outfit in tag.outfits:\n outfit_dict[outfit] = outfit_dict.get(outfit, [])\n outfit_dict[outfit].append(tag)\n outfit_dict[len(outfit_dict[outfit])] = outfit_dict.get(len(outfit_dict[outfit]), [])\n outfit_dict[len(outfit_dict[outfit])].append(outfit)\n if len(outfit_dict[outfit]) > 1:\n outfit_dict[len(outfit_dict[outfit]) - 1].remove(outfit)\n if len(outfit_dict[outfit]) > most_tags:\n most_tags = len(outfit_dict[outfit])\n outfit_dict['top_pick'] = (outfit)\n\n for i in range(1, most_tags + 1):\n for outfit in outfit_dict[i]:\n outfit_dict['all_picks'].append(outfit)\n\n else:\n print('Event has no tags!')\n return None\n\n return outfit_dict\n\n # If self.date within 1 week of last worn for outfit_dict's top pick, \n # look at all other items with same number of tags\n # e.g. outfit_dict[3] = [outfit1, outfit2, outfit3] \n # eliminate outfit1, worn this week; look at outfit2\n # if not worn this week, replace top_pick.\n # top_pick = outfit_dict['top_pick'] and then get \n def remove_recent_outfits(self, outfit_dict):\n \"\"\"Iterate through all_picks removing any outfits worn in last week.\n\n >>> event = WearEvent.query.get(58)\n >>> od1 = event.match_tags()\n >>> od2 = event.remove_recent_outfits(od1)\n \"\"\"\n \n delta = timedelta(days=7)\n outfit_dict2 = {}\n outfit_dict2['all_picks'] = list(outfit_dict['all_picks'])\n outfit_dict2['top_pick'] = outfit_dict['top_pick']\n # if outfit_dict['top_pick'].last_worn() <= self.date - delta:\n\n for outfit in outfit_dict2['all_picks']:\n if outfit.last_worn() != None:\n print(outfit.outfit_id)\n print(outfit.last_worn().date)\n print(self.date - delta)\n if outfit.last_worn().date >= self.date - delta:\n outfit_dict2['all_picks'].remove(outfit)\n if outfit == outfit_dict2['top_pick']:\n outfit_dict2['top_pick'] = None\n else:\n continue\n\n if outfit_dict2['top_pick']:\n return outfit_dict2\n else:\n outfit_dict2['top_pick'] = outfit_dict2['all_picks'][-1]\n return outfit_dict2\n # if outfit_dict['top_pick'].last_worn() <= self.date - delta:\n # del outfit_dict['top_pick']\n # next_pick = outfit_dict[max_tags].get(outfit_dict[max_tags], None)\n # if next_pick:\n # outfit_dict['top_pick'] = next_pick\n # else:\n # max_tags -= 1\n # outfit_dict['top_pick'] = outfit_dict[max_tags].get(outfit_dict[max_tags], None)\n # self.remove_recent_outfits(outfit_dict)\n\n def recommend_coats(self):\n \"\"\"Logic for recommending extra layers.\"\"\"\n\n coat_count = 0\n\n precip_set = {\n \"rain\",\n \"raining\",\n \"drizzle\",\n \"snow\",\n \"snowing\",\n \"sleet\",\n \"sleeting\",\n \"hail\",\n \"hailing\",\n \"storm\",\n \"storms\",\n \"thunderstorm\",\n \"thunderstorms\",\n \"rainstorm\",\n \"rainstorms\",\n \"shower\",\n \"showers\"\n }\n weather_condition_set = set(self.weather_cond.split())\n\n if (precip_set & weather_condition_set):\n coat_count = 1\n\n if self.temperature >= 70:\n pass\n elif self.temperature >= 60:\n coat_count = 1\n elif self.temperature >= 45:\n coat_count += 1\n else:\n coat_count += 2\n\n return coat_count\n\n def filter_outfits_by_weather_and_recent(self):\n \"\"\"Combine match_tags(), remove_recent_outfits(), & recommend_coats() for a final recommendation.\n\n >>> event = WearEvent.query.get(58)\n >>> result = event.filter_outfits_by_weather_and_recent()\n >>> result\n {'all_picks': [\n <outfit_id=21 name= description=>,\n <outfit_id=23 name= description=Green sweater, white>,\n <outfit_id=25 name= description=Poncho + henley>,\n <outfit_id=33 name= description=>,\n <outfit_id=64 name= description=>\n ], \n 'top_pick': <outfit_id=64 name= description=>\n }\n \"\"\"\n\n outfit_dict = self.match_tags()\n if outfit_dict:\n outfit_dict2 = self.remove_recent_outfits(outfit_dict)\n coat_count = self.recommend_coats() if self.weather_cond else 0\n get_outerwear_categories = Category.query.filter(Category.base_category_id == 'outers').all()\n\n count_of_outerwear = 0\n \n outfit_dict3 = recursive_filter(coat_count, outfit_dict2, get_outerwear_categories)\n\n if outfit_dict3:\n return outfit_dict3\n\n else:\n # TODO: recommend a non-weather-appropriate outfit and print text suggesting a jacket\n # Better implementation would suggest a new outfit created from a top_pick \n # and one or more jackets as appropriate. \n outfit_dict2['top_pick'] = None\n return outfit_dict2\n else:\n return None\n\n def add_tag(self, tag):\n \"\"\"Add the tag to the event.\"\"\"\n\n self.tags.append(tag)\n db.session.commit()\n\n def remove_tag(self, tag):\n \"\"\"Remove the tag from the event.\"\"\"\n\n self.tags.remove(tag)\n db.session.commit()\n\n def delete(self):\n \"\"\"Remove the event.\"\"\"\n\n db.session.delete(self)\n db.session.commit()\n\n def __repr__(self):\n return f'<wear_event_id={self.wear_event_id} name={self.name} user_id={self.user_id}>'\n\n\ndef recursive_filter(coat_count, outfit_dict, category_list):\n \"\"\"Recursively walks through outfit_dict to look for a match with enough coats.\"\"\"\n\n count_of_outerwear = 0\n for category in category_list:\n count_of_outerwear += outfit_dict['top_pick'].count_category_articles(category)\n print(f'category={category.name}, count={count_of_outerwear}')\n\n if count_of_outerwear >= coat_count:\n # First base case is when there are enough coats present\n return outfit_dict\n elif len(outfit_dict['all_picks']) < 1:\n # Second base case is when we run out of outfits\n return None\n else:\n print(f\"old top pick={outfit_dict['top_pick']}, new top pick={outfit_dict['all_picks'][-1]}\")\n outfit_dict['top_pick'] = outfit_dict['all_picks'][-1]\n outfit_dict['all_picks'].pop()\n recursive_filter(coat_count, outfit_dict, category_list)\n\n\nclass BaseCategory(db.Model):\n \"\"\"Standard categories of clothing articles.\"\"\"\n\n __tablename__ = 'base_categories'\n\n base_category_id = db.Column(db.Unicode(10), primary_key=True)\n name = db.Column(db.String(64), nullable=False)\n description = db.Column(db.String(256), nullable=True)\n\n def __repr__(self):\n return f'<base_category_id={self.base_category_id} name={self.name}>'\n\n\nclass ArticleOutfit(db.Model):\n \"\"\"Association table for articles and outfits.\"\"\"\n\n __tablename__ = 'articles_outfits'\n\n article_outfit_id = db.Column(db.Integer,\n autoincrement=True,\n primary_key=True)\n article_id = db.Column(db.Integer,\n db.ForeignKey('articles.article_id'),\n nullable=False)\n outfit_id = db.Column(db.Integer,\n db.ForeignKey('outfits.outfit_id'),\n nullable=False)\n\n def __repr__(self):\n return f'<article_outfit_id={self.article_outfit_id} \\\n article_id={self.article_id} \\\n outfit_id={self.outfit_id}>'\n\n\nclass TagArticle(db.Model):\n \"\"\"Association table for tags and articles.\"\"\"\n \n __tablename__ = 'tags_articles'\n\n tag_article_id = db.Column(db.Integer,\n autoincrement=True,\n primary_key=True)\n article_id = db.Column(db.Integer,\n db.ForeignKey('articles.article_id'),\n nullable=False)\n tag_id = db.Column(db.Integer,\n db.ForeignKey('tags.tag_id'),\n nullable=False)\n\n def __repr__(self):\n return f'<tag_article_id={self.tag_article_id} tag_id={self.tag_id} article_id={self.article_id}>'\n\n\nclass TagOutfit(db.Model):\n \"\"\"Association table for tags and outfits.\"\"\"\n \n __tablename__ = 'tags_outfits'\n\n tag_outfit_id = db.Column(db.Integer,\n autoincrement=True,\n primary_key=True)\n outfit_id = db.Column(db.Integer,\n db.ForeignKey('outfits.outfit_id'),\n nullable=False)\n tag_id = db.Column(db.Integer,\n db.ForeignKey('tags.tag_id'),\n nullable=False)\n\n def __repr__(self):\n return f'<tag_outfit_id={self.tag_outfit_id} tag_id={self.tag_id} outfit_id={self.outfit_id}>'\n\n\nclass TagEvent(db.Model):\n \"\"\"Association table for tags and events.\"\"\"\n \n __tablename__ = 'tags_events'\n\n tag_event_id = db.Column(db.Integer,\n autoincrement=True,\n primary_key=True)\n wear_event_id = db.Column(db.Integer,\n db.ForeignKey('wear_events.wear_event_id'),\n nullable=False)\n tag_id = db.Column(db.Integer,\n db.ForeignKey('tags.tag_id'),\n nullable=False)\n\n def __repr__(self):\n return f'<tag_outfit_id={self.tag_outfit_id} tag_id={self.tag_id} wear_event_id={self.wear_event_id}>'\n\n\n##############################################################################\n# Helper functions\n\ndef connect_to_db(app):\n \"\"\"Connect the database to our Flask app.\"\"\"\n\n # Configure to use our PstgreSQL database\n app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///clothes'\n # app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///testclothes'\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n # app.config['SQLALCHEMY_ECHO'] = True\n db.app = app\n db.init_app(app)\n\n\nif __name__ == '__main__':\n # As a convenience, if we run this module interactively, it will leave\n # you in a state of being able to work with the database directly.\n\n from server import app \n connect_to_db(app)\n print('Connected to DB.')\n # db.create_all()\n" }, { "alpha_fraction": 0.4942857027053833, "alphanum_fraction": 0.7057142853736877, "avg_line_length": 15.666666984558105, "blob_id": "467c9ffa123569b17374f1992bcfe6722395c569", "content_id": "e98feddd011beb69faa520c0b6975aa469f4d60b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 350, "license_type": "no_license", "max_line_length": 26, "num_lines": 21, "path": "/requirements.txt", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "blinker==1.4\ncertifi==2019.11.28\nchardet==3.0.4\nClick==7.0\ncloudinary==1.20.0\ndarksky==0.3\nFlask==1.1.1\nFlask-DebugToolbar==0.11.0\nFlask-SQLAlchemy==2.4.1\nidna==2.9\nitsdangerous==1.1.0\nJinja2==2.11.1\nmapq==0.3\nMarkupSafe==1.1.1\npsycopg2-binary==2.8.4\nrequests==2.23.0\nsimplejson==3.17.0\nsix==1.14.0\nSQLAlchemy==1.3.13\nurllib3==1.25.8\nWerkzeug==1.0.0\n" }, { "alpha_fraction": 0.2824207544326782, "alphanum_fraction": 0.39289143681526184, "avg_line_length": 18.660377502441406, "blob_id": "5c5989c17d63461dea9936b2fab7faf0f893cd28", "content_id": "40dfcc3ee5dc591e5754fabeac1aa8e55a53b1c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 33, "num_lines": 53, "path": "/global_var.py", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "CITIES = {\n 'ATL': {\n 'name': 'Atlanta',\n 'location_id': 'ATL',\n 'lat': 33.753746,\n 'lng': -84.386330,\n },\n 'MSP': {\n 'name': 'Minneapolis',\n 'location_id': 'MSP',\n 'lat': 44.970697,\n 'lng': -93.2614785,\n },\n 'OAK': {\n 'name': 'Oakland',\n 'location_id': 'OAK',\n 'lat': 37.804363, \n 'lng': -122.271111,\n },\n 'RDU': {\n 'name': 'Raleigh',\n 'location_id': 'RDU',\n 'lat': 35.787743, \n 'lng': -78.644257, \n },\n 'SFO':{\n 'name': 'San Francisco',\n 'location_id': 'SFO',\n 'lat': 37.774929, \n 'lng': -122.419418,\n },\n 'StCrz': {\n 'name': 'Santa Cruz',\n 'location_id': 'StCrz',\n 'lat': 37.276741, \n 'lng': -121.922869,\n },\n}\n\nMONTHS = {\n 1: 'January',\n 2: 'February',\n 3: 'March',\n 4: 'April',\n 5: 'May',\n 6: 'June',\n 7: 'July',\n 8: 'August',\n 9: 'September',\n 10: 'October',\n 11: 'November',\n 12: 'December'\n}" }, { "alpha_fraction": 0.5962646007537842, "alphanum_fraction": 0.5979766249656677, "avg_line_length": 30.970149993896484, "blob_id": "740e8058f0fec1a3156bd874d0f25acad4fbde40", "content_id": "c5fd9e370d029e2f18d5adb242ad8854910d5d13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6425, "license_type": "no_license", "max_line_length": 121, "num_lines": 201, "path": "/seed.py", "repo_name": "mukoko99/unfold-wardrobe-manager", "src_encoding": "UTF-8", "text": "\"\"\"Utility file to seed database\"\"\"\n\nfrom sqlalchemy import func\n\n# Import helper function, SQLAlchemy database, and model definitions\nfrom model import (connect_to_db, db, User, BaseCategory, Category, Article,\n Outfit, Tag, ArticleOutfit, TagArticle, TagOutfit)\n\nfrom server import app\nfrom datetime import datetime\n\n\ndef load_users():\n \"\"\"Load users from seed-user.txt into database.\"\"\"\n\n print(\"Users\")\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n User.query.delete()\n\n # Read seed category file and insert data\n for row in open(\"seed/seed-user.txt\"):\n row = row.rstrip()\n user_id, email, password = row.split(\"|\")\n\n user = User(user_id=user_id,\n email=email.lower(), # Cast to lowercase\n password=password)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(user)\n\n # Once we're done, we should commit our work\n db.session.commit()\n\n\ndef load_base_categories():\n \"\"\"Load base categories from seed-category.txt into database.\"\"\"\n\n print(\"Base Categories\")\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n BaseCategory.query.delete()\n\n # Read seed category file and insert data\n for row in open(\"seed/seed-category.txt\"):\n row = row.rstrip()\n base_category_id, name, description = row.split(\"|\")\n\n base_category = BaseCategory(base_category_id=base_category_id,\n name=name,\n description=description)\n db.session.add(base_category)\n db.session.commit()\n\n\ndef load_user_categories():\n \"\"\"Load user categories from seed-user-category.txt into database.\"\"\"\n\n print(\"User Categories\")\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Category.query.delete()\n\n # Read seed category file and insert data\n for row in open(\"seed/seed-user-category-2.txt\"):\n row = row.rstrip()\n # Works for original seed data\n # user_id, base_category_id, name, description = row.split(\"|\")\n\n # These are metadata lines in the file\n if not row.startswith('--'):\n category_id, name, description, user_id, base_category_id = row.split(\"|\")\n\n category = Category(category_id=int(category_id),\n name=name,\n description=description,\n user_id=int(user_id),\n base_category_id=base_category_id)\n db.session.add(category)\n db.session.commit()\n\n\ndef load_articles():\n \"\"\"Load articles from seed-article.txt into database.\"\"\"\n\n print(\"Articles\")\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Article.query.delete()\n\n # Read seed category file and insert data\n for row in open(\"seed/seed-article-2.txt\"):\n row = row.rstrip()\n # Works for original seed data \n # user_id, category_id, description = row.split(\"|\")\n\n # These are metadata lines in the file\n if not row.startswith('--'):\n article_id, description, image, purchase_price, times_worn, sell_price, user_id, category_id = row.split(\"|\")\n \n # Prevent passing an empty string into field expecting float\n if not purchase_price:\n purchase_price = None\n \n article = Article(article_id=int(article_id),\n description=description,\n image=image,\n purchase_price=purchase_price,\n times_worn=times_worn,\n user_id=int(user_id),\n category_id=int(category_id),\n )\n db.session.add(article)\n db.session.commit()\n\n\ndef load_tags():\n \"\"\"Load tags from seed-tag.txt into database.\"\"\"\n\n print(\"Tags\")\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Tag.query.delete()\n\n # Read seed category file and insert data\n for row in open(\"seed/seed-tag.txt\"):\n row = row.rstrip()\n user_id, name = row.split(\"|\")\n\n tag = Tag(user_id=int(user_id),\n name=name)\n\n # We need to add to the session or it won't ever be stored\n db.session.add(tag)\n\n # Once we're done, we should commit our work\n db.session.commit()\n\n\ndef set_val_user_id():\n \"\"\"Set value for the next user_id after seeding database\"\"\"\n\n # Get the Max user_id in the database\n result = db.session.query(func.max(User.user_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next user_id to be max_id + 1\n query = \"SELECT setval('users_user_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id + 1})\n db.session.commit()\n\n\ndef set_val_category_id():\n \"\"\"Set value for the next category_id after seeding database\"\"\"\n\n # Get the Max category_id in the database\n result = db.session.query(func.max(Category.category_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next category_id to be max_id + 1\n query = \"SELECT setval('categories_category_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id + 1})\n db.session.commit()\n\n\ndef set_val_article_id():\n \"\"\"Set value for the next article_id after seeding database\"\"\"\n\n # Get the Max article_id in the database\n result = db.session.query(func.max(Article.article_id)).one()\n max_id = int(result[0])\n\n # Set the value for the next category_id to be max_id + 1\n query = \"SELECT setval('articles_article_id_seq', :new_id)\"\n db.session.execute(query, {'new_id': max_id + 1})\n db.session.commit()\n\n\nif __name__ == \"__main__\":\n connect_to_db(app)\n\n # In case tables haven't been created, create them\n db.create_all()\n\n # Import data\n load_users()\n load_base_categories()\n load_user_categories()\n load_articles()\n load_tags()\n\n # Update IDs to reflect imported data\n set_val_user_id()\n set_val_category_id()\n set_val_article_id()" } ]
6
pythonthings/RaceDB
https://github.com/pythonthings/RaceDB
a5a7f82f24045d4bd5f64fe600d3fcf189d9a9be
1468a5454ba0805ad8494bb0f6b446a5abbb4b2b
bbb2e069eb15518ac7362685d1439254eafd91a1
refs/heads/master
2020-07-14T10:46:21.070575
2019-08-03T21:16:48
2019-08-03T21:16:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7220339179039001, "alphanum_fraction": 0.7220339179039001, "avg_line_length": 21.69230842590332, "blob_id": "cd09ee1bbb2feddd7ecb2a148ab12f53745e5b08", "content_id": "c89fd7555bfff3cad8e5fc5d431eb6d89e21b3fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 60, "num_lines": 13, "path": "/RaceDB/urls.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, re_path\nfrom django.shortcuts import redirect\n\nfrom django.contrib import admin\n\nfrom core import urls as core_urls\n\nadmin.autodiscover()\n\nurlpatterns = [\n\tre_path(r'^[Rr][Aa][Cc][Ee][Dd][Bb]/', include(core_urls)),\n\tre_path(r'^admin/', admin.site.urls),\n]\n" }, { "alpha_fraction": 0.47780925035476685, "alphanum_fraction": 0.5165250301361084, "avg_line_length": 19.365385055541992, "blob_id": "1de1dd603b3eb1dedab317af0149086aad31c446", "content_id": "2a1137c7b1d6229d9c1829d07884e9e4373ef325", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1059, "license_type": "no_license", "max_line_length": 85, "num_lines": 52, "path": "/core/get_id.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "UTF-8", "text": "import os\nimport random\n\ndef get_id( bits = 122 ):\n\ttry:\n\t\tv = 0\n\t\tfor b in os.urandom( (bits >> 3) + int((bits & 7) != 0) ):\n\t\t\tv = (v << 8) | ord(b)\n\t\tv &= (1 << bits)-1\n\texcept NotImplementedError:\n\t\tv = random.getrandbits( bits )\n\t\n\tif not (v & 0xf << (bits-4)):\n\t\tv ^= (1 << bits)-1\n\treturn '{:X}'.format( v )\n\t\n\t'''\n\tu = uuid.uuid4()\n\ti = u.int\n\tv = '{:0{}X}'.format( i & ((1<<bits)-1), bits//4 )\n\treturn 'F' + v[1:] if v[0] == '0' else v\n\t'''\n\t\n\t\n\t'''\n\tu = uuid.uuid1()\n\t\n\tnode, node_bits\t\t= u.node, \t\t48\n\tseq, seq_bits\t\t= u.clock_seq,\t14\n\ttime, time_bits\t\t= u.time,\t\t60\n\t\n\tif bits < 122:\n\t\t# Throw away bits in the sequence and node.\n\t\tbits = max( bits, 60 )\n\t\td = 122 - bits\n\t\t\n\t\td_seq_bits = (d * seq_bits) // node_bits\n\t\td_node_bits = d - d_seq_bits\n\t\t\n\t\tnode >>= d_node_bits\n\t\tnode_bits -= d_node_bits\n\t\t\n\t\tseq >>= d_seq_bits\n\t\tseq_bits -= d_seq_bits\n\t\n\treturn '{:X}'.format( (node << (seq_bits + time_bits)) + (seq << time_bits) + time )\n\t'''\n\t\nif __name__ == '__main__':\n\tus = [get_id(96) for i in range(20)]\n\tfor u in us:\n\t\tprint ( u, len(u) )\n" }, { "alpha_fraction": 0.5762032270431519, "alphanum_fraction": 0.728721022605896, "avg_line_length": 37.68534469604492, "blob_id": "6f82275058605e5127c1f18cfa05149a13be7b08", "content_id": "76146304f796aa880835c4c7a0579523f1fcb8f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8985, "license_type": "no_license", "max_line_length": 193, "num_lines": 232, "path": "/core/init_riders.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "ISO-8859-2", "text": "from django.db import transaction\nimport datetime\n\nfrom .models import *\nfrom . import utils\nfrom .large_delete_all import large_delete_all\n\ntdf = b'''\n1\tAlberto Contador\t Spain\tSaxo Bank-SunGard\t28\t5\n2\tJesús Hernández\t Spain\tSaxo Bank-SunGard\t29\t92\n3\tDaniel Navarro\t Spain\tSaxo Bank-SunGard\t27\t62\n4\tBenjamín Noval\t Spain\tSaxo Bank-SunGard\t32\t116\n5\tRichie Porte\t Australia\tSaxo Bank-SunGard\t26\t72\n6\tChris Anker Sřrensen\t Denmark\tSaxo Bank-SunGard\t26\t37\n7\tNicki Sřrensen\t Denmark\tSaxo Bank-SunGard\t36\t95\n8\tMatteo Tosatto\t Italy\tSaxo Bank-SunGard\t37\t123\n9\tBrian Vandborg\t Denmark\tSaxo Bank-SunGard\t29\t125\n11\tAndy Schleck\t Luxembourg\tLeopard Trek\t26\t2\n12\tFabian Cancellara\t Switzerland\tLeopard Trek\t30\t119\n13\tJakob Fuglsang\t Denmark\tLeopard Trek\t26\t50\n14\tLinus Gerdemann\t Germany\tLeopard Trek\t28\t60\n15\tMaxime Monfort\t Belgium\tLeopard Trek\t28\t29\n16\tStuart O'Grady\t Australia\tLeopard Trek\t37\t78\n17\tJoost Posthuma\t Netherlands\tLeopard Trek\t30\t108\n18\tFränk Schleck\t Luxembourg\tLeopard Trek\t31\t3\n19\tJens Voigt\t Germany\tLeopard Trek\t39\t67\n21\t Samuel Sánchez\t Spain\tEuskaltel-Euskadi\t33\t6\n22\tGorka Izagirre\t Spain\tEuskaltel-Euskadi\t23*\t66\n23\tEgoi Martínez\t Spain\tEuskaltel-Euskadi\t33\t34\n24\tAlan Pérez\t Spain\tEuskaltel-Euskadi\t28\t94\n25\tRubén Pérez\t Spain\tEuskaltel-Euskadi\t29\t75\n26\tAmets Txurruka\t Spain\tEuskaltel-Euskadi\t28\tDNF-9\n27\tPablo Urtasun\t Spain\tEuskaltel-Euskadi\t31\t149\n28\tIván Velasco\t Spain\tEuskaltel-Euskadi\t31\tDNS-6\n29\tGorka Verdugo\t Spain\tEuskaltel-Euskadi\t32\t25\n31\tJurgen Van Den Broeck\t Belgium\tOmega Pharma-Lotto\t28\tDNF-9\n32\tPhilippe Gilbert\t Belgium\tOmega Pharma-Lotto\t28\t38\n33\tAndré Greipel\t Germany\tOmega Pharma-Lotto\t28\t156\n34\tSebastian Lang\t Germany\tOmega Pharma-Lotto\t31\t113\n35\tJurgen Roelandts\t Belgium\tOmega Pharma-Lotto\t26\t85\n36\tMarcel Sieberg\t Germany\tOmega Pharma-Lotto\t29\t141\n37\tJurgen Van de Walle\t Belgium\tOmega Pharma-Lotto\t34\tDNF-4\n38\tJelle Vanendert\t Belgium\tOmega Pharma-Lotto\t26\t20\n39\tFrederik Willems\t Belgium\tOmega Pharma-Lotto\t31\tDNF-9\n41\tRobert Gesink\t Netherlands\tRabobank\t25*\t33\n42\tCarlos Barredo\t Spain\tRabobank\t30\t35\n43\tLars Boom\t Netherlands\tRabobank\t25\tDNF-13\n44\tJuan Manuel Gárate\t Spain\tRabobank\t35\tDNS-9\n45\tBauke Mollema\t Netherlands\tRabobank\t24*\t70\n46\tGrischa Niermann\t Germany\tRabobank\t35\t71\n47\tLuis León Sánchez\t Spain\tRabobank\t27\t57\n48\tLaurens ten Dam\t Netherlands\tRabobank\t30\t58\n49\tMaarten Tjallingii\t Netherlands\tRabobank\t33\t99\n51\tThor Hushovd\t Norway\t Garmin-Cervélo\t33\t68\n52\tTom Danielson\t United States\t Garmin-Cervélo\t33\t9\n53\tJulian Dean\t New Zealand\t Garmin-Cervélo\t36\t145\n54\tTyler Farrar\t United States\t Garmin-Cervélo\t27\t159\n55\tRyder Hesjedal\t Canada\t Garmin-Cervélo\t30\t18\n56\tDavid Millar\t United Kingdom\t Garmin-Cervélo\t34\t76\n57\tRamunas Navardauskas\t Lithuania\t Garmin-Cervélo\t23*\t157\n58\tChristian Vande Velde\t United States\t Garmin-Cervélo\t35\t17\n59\tDavid Zabriskie\t United States\t Garmin-Cervélo\t32\tDNF-9\n61\tAlexandre Vinokourov\t Kazakhstan\tAstana\t37\tDNF-9\n62\tRémy Di Gregorio\t France\tAstana\t25\t39\n63\tDmitry Fofonov\t Kazakhstan\tAstana\t34\t106\n64\tAndriy Hryvko\t Ukraine\tAstana\t27\t144\n65\tMaxim Iglinsky\t Kazakhstan\tAstana\t30\t105\n66\tRoman Kreuziger\t Czech Republic\tAstana\t25*\t112\n67\tPaolo Tiralongo\t Italy\tAstana\t33\tDNF-17\n68\tTomas Vaitkus\t Lithuania\tAstana\t29\t140\n69\tAndrey Zeits\t Kazakhstan\tAstana\t24*\t45\n71\tJanez Brajkovic\t Slovenia\tTeam RadioShack\t27\tDNF-5\n72\tChris Horner\t United States\tTeam RadioShack\t39\tDNS-8\n73\tMarkel Irizar\t Spain\tTeam RadioShack\t31\t84\n74\tAndreas Klöden\t Germany\tTeam RadioShack\t36\tDNF-13\n75\tLevi Leipheimer\t United States\tTeam RadioShack\t37\t32\n76\tDimitry Muravyev\t Kazakhstan\tTeam RadioShack\t31\t129\n77\tSérgio Paulinho\t Portugal\tTeam RadioShack\t31\t81\n78\tYaroslav Popovych\t Ukraine\tTeam RadioShack\t31\tDNS-10\n79\tHaimar Zubeldia\t Spain\tTeam RadioShack\t34\t16\n81\tDavid Arroyo\t Spain\tMovistar Team\t31\t36\n82\tAndrey Amador\t Costa Rica\tMovistar Team\t24*\t166\n83\tRui Costa\t Portugal\tMovistar Team\t24 *\t90\n84\tImanol Erviti\t Spain\tMovistar Team\t27\t88\n85\tIván Gutiérrez\t Spain\tMovistar Team\t32\t102\n86\tBeńat Intxausti\t Spain\tMovistar Team\t25*\tDNF-8\n87\tVasil Kiryienka\t Belarus\tMovistar Team\t30\tHD-6\n88\tJosé Joaquin Rojas\t Spain\tMovistar Team\t26\t80\n89\tFrancisco Ventoso\t Spain\tMovistar Team\t29\t139\n91\tIvan Basso\t Italy\tLiquigas-Cannondale\t33\t8\n92\tMaciej Bodnar\t Poland\tLiquigas-Cannondale\t26\t143\n93\tKristijan Koren\t Slovenia\tLiquigas-Cannondale\t24*\t87\n94\tPaolo Longo Borghini\t Italy\tLiquigas-Cannondale\t30\t126\n95\tDaniel Oss\t Italy\tLiquigas-Cannondale\t24*\t100\n96\tMaciej Paterski\t Poland\tLiquigas-Cannondale\t24*\t69\n97\tFabio Sabatini\t Italy\tLiquigas-Cannondale\t26\t167\n98\tSylwester Szmyd\t Poland\tLiquigas-Cannondale\t33\t42\n99\tAlessandro Vanotti\t Italy\tLiquigas-Cannondale\t30\t133\n101\tNicolas Roche\t Ireland\tAg2r-La Mondiale\t26\t26\n102\tMaxime Bouet\t France\tAg2r-La Mondiale\t24*\t55\n103\tHubert Dupont\t France\tAg2r-La Mondiale\t30\t22\n104\tJohn Gadret\t France\tAg2r-La Mondiale\t32\tDNF-11\n105\tSébastien Hinault\t France\tAg2r-La Mondiale\t37\t111\n106\tBlel Kadri\t France\tAg2r-La Mondiale\t24*\t117\n107\tSébastien Minard\t France\tAg2r-La Mondiale\t29\t110\n108\tJean-Christophe Péraud\t France\tAg2r-La Mondiale\t34\t10\n109\tChristophe Riblon\t France\tAg2r-La Mondiale\t30\t51\n111\tBradley Wiggins\t United Kingdom\tTeam Sky\t31\tDNF-7\n112\tJuan Antonio Flecha\t Spain\tTeam Sky\t33\t98\n113\tSimon Gerrans\t Australia\tTeam Sky\t31\t96\n114\tEdvald Boasson Hagen\t Norway\tTeam Sky\t24*\t53\n115\tChristian Knees\t Germany\tTeam Sky\t30\t64\n116\tBen Swift\t United Kingdom\tTeam Sky\t23*\t137\n117\tGeraint Thomas\t United Kingdom\tTeam Sky\t25*\t31\n118\tRigoberto Uran\t Colombia\tTeam Sky\t24*\t24\n119\tXabier Zandio\t Spain\tTeam Sky\t34\t48\n121\tSylvain Chavanel\t France\tQuick Step\t32\t61\n122\tTom Boonen\t Belgium\tQuick Step\t30\tDNF-7\n123\tGerald Ciolek\t Germany\tQuick Step\t24*\t150\n124\tKevin De Weert\t Belgium\tQuick Step\t29\t13\n125\tDries Devenyns\t Belgium\tQuick Step\t27\t46\n126\tAddy Engels\t Netherlands\tQuick Step\t34\t146\n127\tJérôme Pineau\t France\tQuick Step\t31\t54\n128\tGert Steegmans\t Belgium\tQuick Step\t30\tDNS-13\n129\tNiki Terpstra\t Netherlands\tQuick Step\t27\t134\n131\tSandy Casar\t France\tFDJ\t32\t27\n132\tWilliam Bonnet\t France\tFDJ\t29\tHD-14\n133\tMickaël Delage\t France\tFDJ\t25\t132\n134\tArnold Jeannesson\t France\tFDJ\t25*\t15\n135\tGianni Meersman\t Belgium\tFDJ\t25\t77\n136\tRémi Pauriol\t France\tFDJ\t29\tDNF-7\n137\tAnthony Roux\t France\tFDJ\t24*\t101\n138\t Jérémy Roy\t France\tFDJ\t28\t86\n139\tArthur Vichot\t France\tFDJ\t22*\t104\n141\t Cadel Evans\t Australia\tBMC Racing Team\t34\t1\n142\tBrent Bookwalter\t United States\tBMC Racing Team\t27\t114\n143\tMarcus Burghardt\t Germany\tBMC Racing Team\t28\t164\n144\tGeorge Hincapie\t United States\tBMC Racing Team\t38\t56\n145\tAmaël Moinard\t France\tBMC Racing Team\t29\t65\n146\tSteve Morabito\t Switzerland\tBMC Racing Team\t28\t49\n147\tManuel Quinziato\t Italy\tBMC Racing Team\t31\t115\n148\tIvan Santaromita\t Italy\tBMC Racing Team\t27\t83\n149\tMichael Schär\t Switzerland\tBMC Racing Team\t24*\t103\n151\tRein Taaramäe\t Estonia\tCofidis\t24*\t12\n152\tMickaël Buffaz\t France\tCofidis\t32\t131\n153\tSamuel Dumoulin\t France\tCofidis\t30\t162\n154\tLeonardo Duque\t Colombia\tCofidis\t31\t121\n155\tJulien El Fares\t France\tCofidis\t26\t40\n156\tTony Gallopin\t France\tCofidis\t23*\t79\n157\tDavid Moncoutié\t France\tCofidis\t36\t41\n158\tTristan Valentin\t France\tCofidis\t29\t118\n159\tRomain Zingle\t Belgium\tCofidis\t24*\t152\n161\tDamiano Cunego\t Italy\tLampre-ISD\t29\t7\n162\tLeonardo Bertagnolli\t Italy\tLampre-ISD\t33\tDNF-18\n163\tGrega Bole\t Slovenia\tLampre-ISD\t25\t127\n164\tMatteo Bono\t Italy\tLampre-ISD\t27\t93\n165\tDanilo Hondo\t Germany\tLampre-ISD\t37\t109\n166\tDenys Kostyuk\t Ukraine\tLampre-ISD\t29\t153\n167\tDavid Loosli\t Switzerland\tLampre-ISD\t31\t59\n168\tAdriano Malori\t Italy\tLampre-ISD\t23*\t91\n169\tAlessandro Petacchi\t Italy\tLampre-ISD\t37\t107\n171\t Mark Cavendish\t United Kingdom\tHTC-Highroad\t26\t130\n172\tLars Bak\t Denmark\tHTC-Highroad\t31\t154\n173\tBernhard Eisel\t Austria\tHTC-Highroad\t30\t161\n174\tMatthew Goss\t Australia\tHTC-Highroad\t24*\t142\n175\tTony Martin\t Germany\tHTC-Highroad\t26\t44\n176\tDanny Pate\t United States\tHTC-Highroad\t32\t165\n177\tMark Renshaw\t Australia\tHTC-Highroad\t28\t163\n178\tTejay van Garderen\t United States\tHTC-Highroad\t22*\t82\n179\tPeter Velits\t Slovakia\tHTC-Highroad\t26\t19\n181\tThomas Voeckler\t France\tTeam Europcar\t32\t4\n182\tAnthony Charteau\t France\tTeam Europcar\t32\t52\n183\tCyril Gautier\t France\tTeam Europcar\t23*\t43\n184\tYohann Gčne\t France\tTeam Europcar\t30\t158\n185\tVincent Jérôme\t France\tTeam Europcar\t26\t155\n186\tChristophe Kern\t France\tTeam Europcar\t30\tDNF-5\n187\tPerrig Quemeneur\t France\tTeam Europcar\t27\t151\n188\t Pierre Rolland\t France\tTeam Europcar\t24*\t11\n189\tSébastien Turgot\t France\tTeam Europcar\t27\t120\n191\tVladimir Karpets\t Russia\tTeam Katusha\t30\t28\n192\tPavel Brutt\t Russia\tTeam Katusha\t29\tDNF-9\n193\tDenis Galimzyanov\t Russia\tTeam Katusha\t24*\tHD-12\n194\tVladimir Gusev\t Russia\tTeam Katusha\t28\t23\n195\tMikhail Ignatiev\t Russia\tTeam Katusha\t26\t147\n196\tVladimir Isaichev\t Russia\tTeam Katusha\t25*\tDNF-13\n197\tAlexandr Kolobnev\t Russia\tTeam Katusha\t30\tDNS-10\n198\tEgor Silin\t Russia\tTeam Katusha\t23*\t73\n199\tYuri Trofimov\t Russia\tTeam Katusha\t27\t30\n201\tRomain Feillu\t France\tVacansoleil-DCM\t27\tDNS-12\n202\tBorut Boic\t Slovenia\tVacansoleil-DCM\t30\t136\n203\tThomas De Gendt\t Belgium\tVacansoleil-DCM\t24*\t63\n204\tJohnny Hoogerland\t Netherlands\tVacansoleil-DCM\t28\t74\n205\tBjörn Leukemans\t Belgium\tVacansoleil-DCM\t34\tHD-19\n206\tMarco Marcato\t Italy\tVacansoleil-DCM\t27\t89\n207\tWout Poels\t Netherlands\tVacansoleil-DCM\t23*\tDNF-9\n208\tRob Ruijgh\t Netherlands\tVacansoleil-DCM\t24*\t21\n209\tLieuwe Westra\t Netherlands\tVacansoleil-DCM\t28\t128\n211\tJérôme Coppel\t France\tSaur-Sojasun\t24*\t14\n212\tArnaud Coyot\t France\tSaur-Sojasun\t30\t148\n213\tAnthony Delaplace\t France\tSaur-Sojasun\t21*\t135\n214\tJimmy Engoulvent\t France\tSaur-Sojasun\t31\t160\n215\tJérémie Galland\t France\tSaur-Sojasun\t28\t138\n216\tJonathan Hivert\t France\tSaur-Sojasun\t26\t97\n217\tFabrice Jeandesboz\t France\tSaur-Sojasun\t26\t124\n218\tLaurent Mangel\t France\tSaur-Sojasun\t30\t122\n219\tYannick Talabardon\t France\tSaur-Sojasun\t29\t47'''\n\ndef init_riders():\n\tlarge_delete_all( Rider )\n\n\ttdf = tdf.strip()\n\tlines = tdf.split( '\\n' )\n\twith transaction.atomic():\n\t\tfor count, line in enumerate(lines):\n\t\t\tif count % 20 != 0:\t# Only add every 20 riders.\n\t\t\t\tcontinue\n\t\t\tfields = line.split( '\\t' )\n\t\t\t\n\t\t\tfull_name = fields[1].strip()\n\t\t\tnames = full_name.split()\n\t\t\tfirst_name = names[0]\n\t\t\tlast_name = ' '.join( names[1:] )\n\t\t\t\t\n\t\t\tteam = fields[3]\n\t\t\tgender = 0\n\t\t\tyears_old = int(fields[4].strip('*'))\n\t\t\tdate_of_birth = datetime.date( 2012 - years_old, 3, 3 )\n\t\t\tlicense = years_old\n\t\t\t\n\t\t\tsafe_print( first_name, last_name, team, gender, date_of_birth, license )\n\t\t\tr = Rider( first_name=first_name.encode('iso-8859-1'), last_name=last_name.encode('iso-8859-1'), team=team.encode('iso-8859-1'), gender=gender, date_of_birth=date_of_birth, license=license )\n\t\t\tr.save()\n\n" }, { "alpha_fraction": 0.5875938534736633, "alphanum_fraction": 0.7277029156684875, "avg_line_length": 36.09923553466797, "blob_id": "9aaad451f7adf3e90b6105692e7039df08eef4ad", "content_id": "f793696af0cdbcf35f11f1c917c0f2f294d6ba6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9721, "license_type": "no_license", "max_line_length": 90, "num_lines": 262, "path": "/core/init_license_holders.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "ISO-8859-2", "text": "\nfrom django.db import transaction\nimport datetime\nimport string\nfrom models import *\nfrom large_delete_all import large_delete_all\nfrom utils import removeDiacritic\nfrom CountryIOC import uci_country_codes\n\ntdf = '''\n1\tChris Froome\t United Kingdom\tTeam Sky\t28\t1\n2\tEdvald Boasson Hagen\t Norway\tTeam Sky\t26\tDNS-13\n3\tPeter Kennaugh\t United Kingdom\tTeam Sky\t24\t77\n4\tVasil Kiryienka\t Belarus\tTeam Sky\t32\tHD-9\n5\tDavid López\t Spain\tTeam Sky\t32\t127\n6\tRichie Porte\t Australia\tTeam Sky\t28\t19\n7\tKanstantsin Sivtsov\t Belarus\tTeam Sky\t30\t90\n8\tIan Stannard\t United Kingdom\tTeam Sky\t26\t135\n9\tGeraint Thomas\t United Kingdom\tTeam Sky\t27\t140\n11\tGreen jersey Peter Sagan\t Slovakia\tCannondale\t23\t82\n12\tMaciej Bodnar\t Poland\tCannondale\t28\t114\n13\tAlessandro De Marchi\t Italy\tCannondale\t27\t71\n14\tTed King\t United States\tCannondale\t30\tHD-4\n15\tKristijan Koren\t Slovenia\tCannondale\t26\t100\n16\tAlan Marangoni\t Italy\tCannondale\t28\t111\n17\tMoreno Moser\t Italy\tCannondale\t22\t94\n18\tFabio Sabatini\t Italy\tCannondale\t28\t117\n19\tBrian Vandborg\t Denmark\tCannondale\t31\t155\n21\tJurgen Van Den Broeck\t Belgium\tLotto-Belisol\t30\tDNS-6\n22\tLars Bak\t Denmark\tLotto-Belisol\t33\t108\n23\tBart De Clercq\t Belgium\tLotto-Belisol\t26\t38\n24\tAndré Greipel\t Germany\tLotto-Belisol\t30\t129\n25\tAdam Hansen\t Australia\tLotto-Belisol\t32\t72\n26\tGreg Henderson\t New Zealand\tLotto-Belisol\t36\t162\n27\tJürgen Roelandts\t Belgium\tLotto-Belisol\t27\t160\n28\tMarcel Sieberg\t Germany\tLotto-Belisol\t31\tDNF-19\n29\tFrederik Willems\t Belgium\tLotto-Belisol\t33\t163\n31\tCadel Evans\t Australia\tBMC Racing Team\t36\t39\n32\tBrent Bookwalter\t United States\tBMC Racing Team\t29\t91\n33\tMarcus Burghardt\t Germany\tBMC Racing Team\t30\t98\n34\tPhilippe Gilbert\t Belgium\tBMC Racing Team\t30\t62\n35\tAmaël Moinard\t France\tBMC Racing Team\t31\t56\n36\tSteve Morabito\t Switzerland\tBMC Racing Team\t30\t35\n37\tManuel Quinziato\t Italy\tBMC Racing Team\t33\t85\n38\tMichael Schär\t Switzerland\tBMC Racing Team\t27\tDNS-9\n39\tTejay van Garderen\t United States\tBMC Racing Team\t24\t45\n41\tAndy Schleck\t Luxembourg\tRadioShack-Leopard\t28\t20\n42\tJan Bakelants\t Belgium\tRadioShack-Leopard\t27\t18\n43\tLaurent Didier\t Luxembourg\tRadioShack-Leopard\t28\t53\n44\tTony Gallopin\t France\tRadioShack-Leopard\t25\t58\n45\tMarkel Irizar\t Spain\tRadioShack-Leopard\t33\t103\n46\tAndreas Klöden\t Germany\tRadioShack-Leopard\t38\t30\n47\tMaxime Monfort\t Belgium\tRadioShack-Leopard\t30\t14\n48\tJens Voigt\t Germany\tRadioShack-Leopard\t41\t67\n49\tHaimar Zubeldia\t Spain\tRadioShack-Leopard\t36\t36\n51\tPierre Rolland\t France\tTeam Europcar\t26\t24\n52\tYukiya Arashiro\t Japan\tTeam Europcar\t28\t99\n53\tJérôme Cousin\t France\tTeam Europcar\t24\t156\n54\tCyril Gautier\t France\tTeam Europcar\t25\t32\n55\tYohann Gčne\t France\tTeam Europcar\t31\t158\n56\tDavide Malacarne\t Italy\tTeam Europcar\t25\t49\n57\tKévin Reza\t France\tTeam Europcar\t25\t134\n58\tDavid Veilleux\t Canada\tTeam Europcar\t25\t123\n59\tThomas Voeckler\t France\tTeam Europcar\t34\t65\n61\tJanez Brajkovic\t Slovenia\tAstana\t29\tDNS-7\n62\tAssan Bazayev\t Kazakhstan\tAstana\t32\t168\n63\tJakob Fuglsang\t Denmark\tAstana\t28\t7\n64\tEnrico Gasparotto\t Italy\tAstana\t31\t95\n65\tFrancesco Gavazzi\t Italy\tAstana\t28\t84\n66\tAndrey Kashechkin\t Kazakhstan\tAstana\t33\tDNF-3\n67\tFredrik Kessiakoff\t Sweden\tAstana\t33\tDNF-6\n68\tAlexey Lutsenko\t Kazakhstan\tAstana\t20\tDNF-18\n69\tDimitry Muravyev\t Kazakhstan\tAstana\t33\t167\n71\tThibaut Pinot\t France\tFDJ.fr\t23\tDNS-16\n72\tWilliam Bonnet\t France\tFDJ.fr\t31\tDNF-18\n73\tNacer Bouhanni\t France\tFDJ.fr\t22\tDNF-6\n74\tPierrick Fédrigo\t France\tFDJ.fr\t34\t59\n75\tMurilo Fischer\t Brazil\tFDJ.fr\t34\t133\n76\tAlexandre Geniez\t France\tFDJ.fr\t25\t44\n77\tArnold Jeannesson\t France\tFDJ.fr\t27\t29\n78\tJérémy Roy\t France\tFDJ.fr\t30\t126\n79\tArthur Vichot\t France\tFDJ.fr\t24\t66\n81\tJean-Christophe Péraud\t France\tAg2r-La Mondiale\t36\tDNF-17\n82\tRomain Bardet\t France\tAg2r-La Mondiale\t22\t15\n83\tMaxime Bouet\t France\tAg2r-La Mondiale\t26\tDNS-6\n84\tSamuel Dumoulin\t France\tAg2r-La Mondiale\t32\t143\n85\tHubert Dupont\t France\tAg2r-La Mondiale\t32\t34\n86\tJohn Gadret\t France\tAg2r-La Mondiale\t34\t22\n87\tBlel Kadri\t France\tAg2r-La Mondiale\t26\t125\n88\tSébastien Minard\t France\tAg2r-La Mondiale\t31\t124\n89\tChristophe Riblon\t France\tAg2r-La Mondiale\t32\t37\n91\tAlberto Contador\t Spain\tTeam Saxo-Tinkoff\t30\t4\n92\tDaniele Bennati\t Italy\tTeam Saxo-Tinkoff\t32\t107\n93\tJesús Hernández\t Spain\tTeam Saxo-Tinkoff\t31\t43\n94\tRoman Kreuziger\t Czech Republic\tTeam Saxo-Tinkoff\t27\t5\n95\tBenjamín Noval\t Spain\tTeam Saxo-Tinkoff\t34\tDNF-9\n96\tSérgio Paulinho\t Portugal\tTeam Saxo-Tinkoff\t33\t136\n97\tNicolas Roche\t Ireland\tTeam Saxo-Tinkoff\t28\t40\n98\tMichael Rogers\t Australia\tTeam Saxo-Tinkoff\t33\t16\n99\tMatteo Tosatto\t Italy\tTeam Saxo-Tinkoff\t39\t92\n101\tJoaquim Rodríguez\t Spain\tTeam Katusha\t34\t3\n102\tPavel Brutt\t Russia\tTeam Katusha\t31\t110\n103\tAlexander Kristoff\t Norway\tTeam Katusha\t25\t147\n104\tAleksandr Kuschynski\t Belarus\tTeam Katusha\t33\t141\n105\tAlberto Losada\t Spain\tTeam Katusha\t31\t109\n106\tDaniel Moreno\t Spain\tTeam Katusha\t31\t17\n107\tGatis Smukulis\t Latvia\tTeam Katusha\t26\t119\n108\tYuri Trofimov\t Russia\tTeam Katusha\t29\t51\n109\tEduard Vorganov\t Russia\tTeam Katusha\t30\t48\n111\tIgor Antón\t Spain\tEuskaltel-Euskadi\t30\t69\n112\tMikel Astarloza\t Spain\tEuskaltel-Euskadi\t33\t42\n113\tGorka Izagirre\t Spain\tEuskaltel-Euskadi\t25\tDNS-17\n114\tJon Izagirre\t Spain\tEuskaltel-Euskadi\t24\t23\n115\tJuan José Lobato\t Spain\tEuskaltel-Euskadi\t24\t78\n116\tMikel Nieve\t Spain\tEuskaltel-Euskadi\t29\t12\n117\tJuan José Oroz\t Spain\tEuskaltel-Euskadi\t32\t165\n118\tRubén Pérez\t Spain\tEuskaltel-Euskadi\t31\t139\n119\tRomain Sicard\t France\tEuskaltel-Euskadi\t25\t122\n121\tAlejandro Valverde\t Spain\tMovistar Team\t33\t8\n122\tAndrey Amador\t Costa Rica\tMovistar Team\t26\t54\n123\tJonathan Castroviejo\t Spain\tMovistar Team\t26\t97\n124\tRui Costa\t Portugal\tMovistar Team\t26\t27\n125\tImanol Erviti\t Spain\tMovistar Team\t29\t118\n126\tJosé Iván Gutiérrez\t Spain\tMovistar Team\t34\tDNF-9\n127\tRubén Plaza\t Spain\tMovistar Team\t33\t47\n128\tNairo Quintana\t Colombia\tMovistar Team\t23\t2\n129\tJosé Joaquín Rojas\t Spain\tMovistar Team\t28\t79\n131\tRein Taaramäe\t Estonia\tCofidis\t26\t102\n132\tYoann Bagot\t France\tCofidis\t25\tDNF-3\n133\tJérôme Coppel\t France\tCofidis\t26\t63\n134\tEgoitz García\t Spain\tCofidis\t27\t115\n135\tChristophe Le Mével\t France\tCofidis\t32\tDNF-19\n136\tGuillaume Levarlet\t France\tCofidis\t27\t61\n137\tLuis Ángel Maté\t Spain\tCofidis\t29\t88\n138\tRudy Molard\t France\tCofidis\t23\t73\n139\tDaniel Navarro\t Spain\tCofidis\t29\t9\n141\tDamiano Cunego\t Italy\tLampre-Merida\t31\t55\n142\tMatteo Bono\t Italy\tLampre-Merida\t29\tDNF-8\n143\tDavide Cimolai\t Italy\tLampre-Merida\t23\t137\n144\tElia Favilli\t Italy\tLampre-Merida\t24\t128\n145\tRoberto Ferrari\t Italy\tLampre-Merida\t30\t157\n146\tAdriano Malori\t Italy\tLampre-Merida\t25\tDNF-7\n147\tManuele Mori\t Italy\tLampre-Merida\t32\t76\n148\tPrzemyslaw Niemiec\t Poland\tLampre-Merida\t33\t57\n149\tJosé Serpa\t Colombia\tLampre-Merida\t34\t21\n151\tMark Cavendish\t United Kingdom\tOmega Pharma-Quick Step\t28\t148\n152\tSylvain Chavanel\t France\tOmega Pharma-Quick Step\t34\t31\n153\tMichal Kwiatkowski\t Poland\tOmega Pharma-Quick Step\t23\t11\n154\tTony Martin\t Germany\tOmega Pharma-Quick Step\t28\t106\n155\tJérôme Pineau\t France\tOmega Pharma-Quick Step\t33\t159\n156\tGert Steegmans\t Belgium\tOmega Pharma-Quick Step\t32\t153\n157\tNiki Terpstra\t Netherlands\tOmega Pharma-Quick Step\t29\t149\n158\tMatteo Trentin\t Italy\tOmega Pharma-Quick Step\t23\t142\n159\tPeter Velits\t Slovakia\tOmega Pharma-Quick Step\t28\t25\n161\tLars Boom\t Netherlands\tBelkin Pro Cycling\t27\t105\n162\tRobert Gesink\t Netherlands\tBelkin Pro Cycling\t26\t26\n163\tTom Leezer\t Netherlands\tBelkin Pro Cycling\t27\t150\n164\tBauke Mollema\t Netherlands\tBelkin Pro Cycling\t26\t6\n165\tLars Petter Nordhaug\t Norway\tBelkin Pro Cycling\t28\t50\n166\tBram Tankink\t Netherlands\tBelkin Pro Cycling\t34\t64\n167\tLaurens ten Dam\t Netherlands\tBelkin Pro Cycling\t32\t13\n168\tSep Vanmarcke\t Belgium\tBelkin Pro Cycling\t24\t131\n169\tMaarten Wynants\t Belgium\tBelkin Pro Cycling\t30\t132\n171\tRyder Hesjedal\t Canada\tGarmin-Sharp\t32\t70\n172\tJack Bauer\t New Zealand\tGarmin-Sharp\t28\tDNF-19\n173\tTom Danielson\t United States\tGarmin-Sharp\t35\t60\n174\tRohan Dennis\t Australia\tGarmin-Sharp\t23\tDNS-9\n175\tDaniel Martin\t Ireland\tGarmin-Sharp\t26\t33\n176\tDavid Millar\t United Kingdom\tGarmin-Sharp\t36\t113\n177\tRamunas Navardauskas\t Lithuania\tGarmin-Sharp\t25\t120\n178\tAndrew Talansky\t United States\tGarmin-Sharp\t24\t10\n179\tChristian Vande Velde\t United States\tGarmin-Sharp\t37\tDNF-7\n181\tSimon Gerrans\t Australia\tOrica-GreenEDGE\t33\t80\n182\tMichael Albasini\t Switzerland\tOrica-GreenEDGE\t32\t86\n183\tSimon Clarke\t Australia\tOrica-GreenEDGE\t26\t68\n184\tMatthew Goss\t Australia\tOrica-GreenEDGE\t26\t152\n185\tDaryl Impey\t South Africa\tOrica-GreenEDGE\t28\t74\n186\tBrett Lancaster\t Australia\tOrica-GreenEDGE\t33\t154\n187\tCameron Meyer\t Australia\tOrica-GreenEDGE\t25\t130\n188\tStuart O'Grady\t Australia\tOrica-GreenEDGE\t39\t161\n189\tSvein Tuft\t Canada\tOrica-GreenEDGE\t36\t169\n191\tJohn Degenkolb\t Germany\tArgos-Shimano\t24\t121\n192\tRoy Curvers\t Netherlands\tArgos-Shimano\t33\t145\n193\tKoen de Kort\t Netherlands\tArgos-Shimano\t30\t138\n194\tTom Dumoulin\t Netherlands\tArgos-Shimano\t22\t41\n195\tJohannes Fröhlinger\t Germany\tArgos-Shimano\t28\t146\n196\tSimon Geschke\t Germany\tArgos-Shimano\t27\t75\n197\tMarcel Kittel\t Germany\tArgos-Shimano\t25\t166\n198\tAlbert Timmer\t Netherlands\tArgos-Shimano\t28\t164\n199\tTom Veelers\t Netherlands\tArgos-Shimano\t28\tDNF-19\n201\tWout Poels\t Netherlands\tVacansoleil-DCM\t25\t28\n202\tKris Boeckmans\t Belgium\tVacansoleil-DCM\t26\tDNF-19\n203\tThomas De Gendt\t Belgium\tVacansoleil-DCM\t26\t96\n204\tJuan Antonio Flecha\t Spain\tVacansoleil-DCM\t35\t93\n205\tJohnny Hoogerland\t Netherlands\tVacansoleil-DCM\t30\t101\n206\tSergey Lagutin\t Uzbekistan\tVacansoleil-DCM\t32\t83\n207\tBoy van Poppel\t Netherlands\tVacansoleil-DCM\t25\t144\n208\tDanny van Poppel\t Netherlands\tVacansoleil-DCM\t19\tDNS-16\n209\tLieuwe Westra\t Netherlands\tVacansoleil-DCM\t30\tDNF-21\n211\tBrice Feillu\t France\tSojasun\t27\t104\n212\tAnthony Delaplace\t France\tSojasun\t23\t89\n213\tJulien El Fares\t France\tSojasun\t28\t81\n214\tJonathan Hivert\t France\tSojasun\t28\t151\n215\tCyril Lemoine\t France\tSojasun\t30\t112\n216\tJean-Marc Marino\t France\tSojasun\t29\t116\n217\tMaxime Méderel\t France\tSojasun\t32\t52\n218\tJulien Simon\t France\tSojasun\t27\t87\n219\tAlexis Vuillermoz\t France\tSojasun\t25\t46\n'''\n\ndef init_license_holders():\n\tglobal tdf\n\t\n\tlarge_delete_all( LicenseHolder )\n\tlarge_delete_all( Team )\n\n\ttdf = tdf.decode('iso-8859-1').strip()\n\tlines = tdf.split( '\\n' )\n\t\n\[email protected]\n\tdef process_records( lines ):\n\t\tfor count, line in enumerate(lines):\n\t\t\tfields = line.split( '\\t' )\n\t\t\t\n\t\t\tfull_name = fields[1].strip()\n\t\t\tnames = full_name.split()\n\t\t\tfirst_name = names[0]\n\t\t\tlast_name = ' '.join( names[1:] )\n\t\t\t\n\t\t\tteam = fields[3].strip()\n\t\t\tgender = 0\n\t\t\tyears_old = int(fields[4].strip('*'))\n\t\t\tdate_of_birth = datetime.date( 2013 - years_old, 3, 3 )\n\t\t\t\n\t\t\tnationality = fields[2].strip()\n\t\t\tnationality_code = uci_country_codes.get( nationality.upper(), 'CAN' )\n\t\t\t\n\t\t\tuci_code = nationality_code + date_of_birth.strftime( '%Y%m%d' )\n\t\t\t\n\t\t\tsafe_print( first_name, last_name, gender, date_of_birth, nationality, uci_code, team )\n\t\t\tfields = {\n\t\t\t\t'first_name': first_name,\n\t\t\t\t'last_name': last_name,\n\t\t\t\t'gender': gender,\n\t\t\t\t'nationality': nationality,\n\t\t\t\t'date_of_birth': date_of_birth,\n\t\t\t\t'uci_code': uci_code,\n\t\t\t\t'license_code': u'{}'.format(count+1),\n\t\t\t}\n\t\t\tif not LicenseHolder.objects.filter(**fields).exists():\n\t\t\t\tLicenseHolder(**fields).save()\n\t\t\t\n\t\t\tif not Team.objects.filter(name = team).exists():\n\t\t\t\tTeam(\n\t\t\t\t\tname = team,\n\t\t\t\t\tteam_code = team[:3],\n\t\t\t\t\tteam_type = 7,\n\t\t\t\t).save()\n\t\t\t\t\n\tprocess_records( lines )\n\nif __name__ == '__main__':\n\tinit_license_holders()\n" }, { "alpha_fraction": 0.3684210479259491, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 18, "blob_id": "efd1705661b276db1a39f811531f2acdf1225c2f", "content_id": "893181b9089288503ab4f6a2338511a93aa71eac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/helptxt/version.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "UTF-8", "text": "version = \"3.0.18\"\n" }, { "alpha_fraction": 0.6482807993888855, "alphanum_fraction": 0.65329509973526, "avg_line_length": 18.94285774230957, "blob_id": "ade6674ccf29e0ea739bf3c3a37dd0d18767a766", "content_id": "5e21cb99cc12e932af117df0c94bc96a1908c6de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1396, "license_type": "no_license", "max_line_length": 77, "num_lines": 70, "path": "/dependencies.py", "repo_name": "pythonthings/RaceDB", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os\nimport sys\nimport shutil\nimport argparse\nimport subprocess\nimport platform\n\nis_windows = (platform.system() == 'Windows')\n\npyllrp = 'pip-install-pyllrp-3.0.0.zip'\n\ndependencies = [\n\t'django==2.2.1',\n\t'django-crispy-forms',\n\t'django-extensions',\n\t'requests',\n\t'dj_static',\n\t'waitress',\n\t'xlsxwriter',\n\t'xlrd',\n\t'pytz',\n\t'tzlocal',\n\t'fpdf',\n\tpyllrp,\n]\n\nuninstall_dependencies = [\n\t#'south',\n]\n\ndef update_dependencies( upgrade ):\n\tprint( 'Updating Dependencies...' )\n\t\n\tpy = sys.executable\n\t\n\tfor d in dependencies:\n\t\targs = [py, '-m', 'pip', 'install', d]\n\t\tif upgrade:\n\t\t\targs.append('--upgrade')\n\t\tprint( ' '.join(args) )\n\t\tsubprocess.call( args )\n\n\tfor d in uninstall_dependencies:\n\t\targs = [py, '-m', 'pip', 'uninstall', d]\n\t\tprint( ' '.join(args) )\n\t\tsubprocess.call( args )\n\n\tprint( 'Removing old compiled files...' )\n\tfor root, dirs, files in os.walk( '.' ):\n\t\tfor f in files:\n\t\t\tfname = os.path.join( root, f )\n\t\t\tif os.path.splitext(fname)[1] == '.pyc':\n\t\t\t\tos.remove( fname )\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser( description='Update RaceDB Dependencies' )\n\tparser.add_argument(\n\t\t'--upgrade',\n\t\taction='store_true',\n\t\tdefault=False,\n\t)\n\t\n\targs = parser.parse_args()\n\tupdate_dependencies( args.upgrade )\n\t\n\tif is_windows:\n\t\tprint( 'Creating Windows desktop shortcut...' )\n\t\timport CreateShortcut\n\t\tCreateShortcut.CreateShortcut()\n" } ]
6
vinaybana/djangoapp
https://github.com/vinaybana/djangoapp
eaaf6486d05dbd2719ed90016482a74301d40458
a96ae9047a7eeba63dec8a8338b0fcfc548804b7
ca54e83da8460bfc6a00f0ce7732a53ec6538e7f
refs/heads/master
2023-06-15T20:20:38.014653
2021-06-22T12:39:53
2021-06-22T12:39:53
288,992,634
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6829919815063477, "alphanum_fraction": 0.6829919815063477, "avg_line_length": 25.13953399658203, "blob_id": "81b5f68147a08e52061ef6527b361db09ef3da78", "content_id": "d5cc35b26e97ee395773c263737515575b75c256", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1123, "license_type": "no_license", "max_line_length": 88, "num_lines": 43, "path": "/blog/serializers.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from .models import Post,Category,Tag,Comment\nfrom rest_framework import serializers\nimport json\n\nclass PostSerializer(serializers.ModelSerializer):\n\tcomments = serializers.SerializerMethodField()\n\n\tdef get_comments(self,obj):\n\t\tcomments = Comment.objects.filter(post = obj.id).all()\n\t\tdata = []\n\t\tfor cmnt in comments:\n\t\t\tdata.append(\n\t\t\t\t{\n\t\t\t\t\t'post':obj.id,\n\t\t\t\t\t'name':cmnt.name,\n\t\t\t\t\t'text': cmnt.text,\n\t\t\t\t\t'created':cmnt.created,\n\t\t\t\t\t'updated':cmnt.updated,\n\t\t\t\t\t'active':cmnt.active,\n\t\t\t\t\t'parent': cmnt.parent\n\t\t\t\t})\n\t\t\t# sent = json.dumps(data)\n\t\treturn data\n\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = ['id','title', 'text', 'author', 'created_date', 'published_date','comments']\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Category\n\t\tfields = ['id','title', 'text', 'parent', 'created_date']\n\nclass TagSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Tag\n\t\tfields = ['id','title', 'text','created_date']\n\nclass CommentSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Comment\n\t\tfields = ['post','name', 'text','created','updated','active','parent']" }, { "alpha_fraction": 0.6365914940834045, "alphanum_fraction": 0.6365914940834045, "avg_line_length": 38.900001525878906, "blob_id": "96e40ba40335f497803bfcdca175764d5b2301d5", "content_id": "ddd3b62582ad0822c5476e2ff7d1aef97ff5f820", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1596, "license_type": "no_license", "max_line_length": 81, "num_lines": 40, "path": "/blog/urls.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from django.contrib import admin\t\nfrom django.urls import path\nfrom .import views\n\n\napp_name = 'blog'\n\n# urlpatterns = [\n# \tpath('post/<int:pk>/edit/', views.post_edit, name='post_edit'),\n# \tpath('post/new/', views.post_new, name='post_new'),\n# \tpath('blog/sign_up/',views.sign_up,name=\"sign-up\"),\n# \tpath('blog/logout/',views.logout, name=\"logout\"),\n# \tpath('post/<int:pk>/', views.post_detail, name='post_detail'),\n# \tpath('login/',views.login, name=\"login\"),\n# path('', views.post_list, name ='post_list'),\n# path('userdetail/<int:pk>/',views.userdetail, name='userdetail'),\n# path('edituser/<int:pk>/', views.edit_profile, name='edit_profile'),\n\n\n \n \n# ]\nurlpatterns = [\n path('post/<str:slug>/edit/', views.post_edit, name='post_edit'),\n path('category/<str:slug>/edit/', views.category_edit, name='category_edit'),\n path('blog/sign_up/',views.sign_up,name=\"sign-up\"),\n\tpath('blog/logout/',views.logout, name=\"logout\"),\n path('post/new/', views.post_new, name='post_new'),\n path('userdetail/<int:pk>/',views.userdetail, name='userdetail'),\n path('edituser/<int:pk>/', views.edit_profile, name='edit_profile'),\n path('post/<str:slug>/', views.post_detail, name='post_detail'),\n path('category/<str:slug>/', views.category_detail, name='category_detail'),\n path('tag/<str:slug>/', views.tag_details, name='tag_details'),\n path('category/', views.category_list, name ='category_list'),\n path('tag/', views.tag_list, name ='tag_list'),\n path('login/',views.login, name=\"login\"),\n path('', views.post_list, name ='post_list'),\n\n \n]\n" }, { "alpha_fraction": 0.7023349404335022, "alphanum_fraction": 0.7065099477767944, "avg_line_length": 30.086538314819336, "blob_id": "544d8f33423ba262d7a623503c3f22ce229e6fc6", "content_id": "d434b94426b1aac204c8a0cd54399f3a960f566d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6467, "license_type": "no_license", "max_line_length": 100, "num_lines": 208, "path": "/blog/views.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, get_object_or_404, redirect\nfrom django.utils import timezone\nfrom .models import Post, Userprofile, Comment,Category,Tag\nfrom .forms import PostForm,ProfileForm, categoryForm,CommentForm\nfrom django.contrib.auth.models import User, auth\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom rest_framework import viewsets\nfrom rest_framework import permissions\nfrom .serializers import PostSerializer\nfrom rest_framework import generics\nfrom django.contrib.auth.decorators import login_required\n\ndef category_list(request):\n\tcategories=Category.objects.all()\n\treturn render(request, 'blog/category_list.html', {'categories':categories})\n\n\n\t\n\ndef category_detail(request,slug):\n\tcategory = get_object_or_404(Category, slug=slug)\n\tprint(category)\n\tposts= Post.objects.filter(category=category)\n\tprint(posts)\n\treturn render(request, 'blog/category_detail.html', {'posts': posts, 'category':category})\n\ndef category_edit(request, slug):\n\tcategory = get_object_or_404(Category, slug=slug)\n\tif request.method == \"POST\":\n\t\tform = CategoryForm(request.POST, instance=category)\n\t\tif form.is_valid():\n\t\t\tcategory = form.save(commit=False)\n\t\t\tcategory.created_date = timezone.now()\n\t\t\tcategory.save()\n\t\t\treturn redirect('blog:category_detail', slug=category.slug)\n\telse:\n\t\tform = CategoryForm(instance=category)\n\treturn render(request, 'blog/category_edit.html', {'form': form})\n\ndef post_list(request):\n\tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n\treturn render(request, 'blog/post_list.html', {'posts': posts})\n\ndef post_detail(request, slug):\n\tpost = get_object_or_404(Post, slug=slug)\n\tcomments=Comment.objects.filter(post=post, parent=None)\n\tnew_comment=None\n\tif request.method == \"POST\":\n\t\tform = CommentForm(request.POST)\n\t\tif form.is_valid():\t\n\t\t\ttext= request.POST.get('text')\n\t\t\tname= request.POST.get('name')\n\t\t\treply_id=request.POST.get('comment_id')\n\t\t\tcomment_obj=None\n\t\t\tif reply_id:\n\t\t\t\tcomment_obj=Comment.objects.get(id=reply_id)\n\t\t\t\n\t\t\tnew_comment = Comment.objects.create(post=post, parent=comment_obj, text=text, name=name)\n\t\t\tnew_comment.save()\n\t\t\treturn redirect('blog:post_detail', slug=post.slug)\n\n\telse:\n\t\tform = CommentForm()\n\treturn render(request, 'blog/post_details.html', {'form':form , 'post': post, 'comments':comments})\n\ndef post_new(request):\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('blog:post_list')\n\t\t\n\telse:\n\t\tform = PostForm()\n\treturn render(request, 'blog/post_edit.html', {'form': form})\n\ndef post_edit(request, slug):\n\tpost = get_object_or_404(Post, slug=slug)\n\tif request.method == \"POST\":\n\t\tform = PostForm(request.POST, instance=post)\n\t\tif form.is_valid():\n\t\t\tpost = form.save(commit=False)\n\t\t\tpost.author = request.user\n\t\t\tpost.published_date = timezone.now()\n\t\t\tpost.save()\n\t\t\treturn redirect('blog:post_detail', slug=post.slug)\n\telse:\n\t\tform = PostForm(instance=post)\n\treturn render(request, 'blog/post_edit.html', {'form': form})\n\ndef tag_list(request):\n\ttags= Tag.objects.all()\n\tprint(tags)\n\treturn render(request, 'blog/tag_list.html', {'tags':tags})\n\n\ndef tag_details(request, slug):\n\ttag = get_object_or_404(Tag, slug=slug)\n\tprint(tag)\n\tposts=Post.objects.filter(tag__slug=tag)\n\treturn render(request, 'blog/tag_details.html', {'tag':tag, 'posts': posts})\n\n\ndef cmnt(request, slug):\n\tpost= get_object_or_404(Post, slug=slug)\n\tcmnt= Post.cmnt.filter(slug=post.slug)\n\treturn cmnt\n\n\n\n# def post_list(request):\n# \tposts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date')\n# \treturn render(request, 'blog/post_list.html', {'posts':posts})\n\n# def post_detail(request, pk):\n# \tpost = get_object_or_404(Post, pk=pk)\n# \treturn render(request, 'blog/post_details.html', {'post': post})\n\n# def post_new(request):\n# \tif request.method == \"POST\":\n# \t\tform = PostForm(request.POST)\n# \t\tif form.is_valid():\n# \t\t\tpost = form.save(commit=False)\n# \t\t\tpost.author = request.user\n# \t\t\tpost.published_date = timezone.now()\n# \t\t\tpost.save()\n# \t\t\treturn redirect('blog:post_detail', pk=post.pk)\n# \telse:\n# \t\tform = PostForm()\n# \treturn render(request, 'blog/post_edit.html', {'form': form})\n\n# def post_edit(request, pk):\n# \tpost = get_object_or_404(Post, pk=pk)\n# \tif request.method == \"POST\":\n# \t\tform = PostForm(request.POST, instance=post)\n# \t\tif form.is_valid():\n# \t\t\tpost = form.save(commit=False)\n# \t\t\tpost.author = request.user\n# \t\t\tpost.published_date = timezone.now()\n# \t\t\tpost.save()\n# \t\t\treturn redirect('blog:post_detail', pk=post.pk)\n# \telse:\n# \t\tform = PostForm(instance=post)\n# \treturn render(request, 'blog/post_edit.html', {'form': form})\n\n\ndef sign_up(request): \n\tcontext = {}\n\tform = UserCreationForm(request.POST or None)\n\tif request.method == \"POST\":\n\t\tif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tauth.login(request, user)\n\t\t\treturn render(request,'blog/post_list.html')\n\tcontext['form']=form\n\treturn render(request,'blog/sign_up.html',context)\n\ndef login(request):\n\n\tif request.method == \"POST\":\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\t\tuser = auth.authenticate(username=username, password=password)\n\t\tif user is not None:\n\t\t\tauth.login(request, user)\n\t\t\treturn render(request, 'blog/post_list.html')\n\telse:\n\t\treturn render(request, 'blog/login.html')\n\ndef logout(request):\n\tlogout(request)\n\treturn render(request, 'blog/post_list.html')\n\n@login_required\ndef userdetail(request, pk):\n\tuser = User.objects.get(pk=pk)\n\tuserimg= Userprofile.objects.filter(user= request.user).first()\n\treturn render(request, 'blog/userdetail.html',{'user':user, 'userimg':userimg})\n\n\ndef edit_profile(request, pk):\n\tuser = User.objects.get(pk=pk)\n\tif request.method == 'POST':\n\t\tform = ProfileForm(request.POST, request.FILES)\n\n\t\tif form.is_valid():\n\t\t\tis_exist = Userprofile.objects.filter(user=request.user).first()\n\t\t\tif is_exist:\n\t\t\t\tis_exist.user_image= form.cleaned_data.get('user_image')\n\t\t\t\tis_exist.user = request.user\n\t\t\t\tis_exist.save()\n\t\t\telse:\n\t\t\t\tuserimg = form.save(commit=False)\n\t\t\t\tuserimg.user = request.user\n\t\t\t\tuserimg.save()\n\t\t\treturn redirect('blog:userdetail',pk=user.pk)\n\telse:\n\t\tform = ProfileForm()\n\treturn render(request,'blog/useredit.html', {'form':form})\n\n \n\n\n# Redirect to a success page.\n\n" }, { "alpha_fraction": 0.6968749761581421, "alphanum_fraction": 0.6968749761581421, "avg_line_length": 24.559999465942383, "blob_id": "461b04b2f46c599a2974e0ae959ffaabc30a0c2c", "content_id": "cfbcf66a500342e796654e6afad891abee918883", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 640, "license_type": "no_license", "max_line_length": 58, "num_lines": 25, "path": "/polls/serializers.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from .models import Question,Choice\nfrom rest_framework import serializers\n\nclass QuestionSerializer(serializers.ModelSerializer):\n\tchoices = serializers.SerializerMethodField()\n\n\tdef get_choices(self, obj):\n\t\tchoices = Choice.objects.filter(question = obj.id).all()\n\t\tdata = []\n\t\tfor opt in choices:\n\t\t\tdata.append(\n\t\t\t\t{\n\t\t\t\t\t'id': opt.id,\n\t\t\t\t\t'choice_text': opt.choice_text,\n\t\t\t\t\t'score': opt.votes\n\t\t\t\t})\n\t\treturn data\n\tclass Meta:\n\t\tmodel = Question\n\t\tfields = ['question_text', 'pub_date','choices']\n\nclass ChoiceSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Choice\n\t\tfields = ['question', 'choice_text', 'votes']\n\n" }, { "alpha_fraction": 0.7982954382896423, "alphanum_fraction": 0.7982954382896423, "avg_line_length": 38.16666793823242, "blob_id": "b7a67b1b994f8d94e3d9f7d487a6e42bdfa85bf4", "content_id": "7b61e975aea57b7d84500258f8635296fc3519b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 89, "num_lines": 18, "path": "/polls/api.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from rest_framework import serializers, viewsets, status as status_code, generics, mixins\nfrom .serializers import *\nfrom .models import *\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom django.contrib.auth import authenticate\nfrom rest_framework.response import Response\n\nclass QuestionViewSet(viewsets.ModelViewSet):\n\tserializer_class = QuestionSerializer\n\tqueryset = Question.objects.all()\n\thttp_method_names = ['get','post','put','patch']\n\nclass ChoiceViewSet(viewsets.ModelViewSet):\n\tserializer_class = ChoiceSerializer\n\tqueryset = Choice.objects.all()\n\thttp_method_names = ['get','post','put','patch']" }, { "alpha_fraction": 0.781931459903717, "alphanum_fraction": 0.781931459903717, "avg_line_length": 25.83333396911621, "blob_id": "9cdbdc1df0c075b5d50c57cfe2a032050e925226", "content_id": "fa82879ed9328ef2fee396dd303cf2254a11cc7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 321, "license_type": "no_license", "max_line_length": 49, "num_lines": 12, "path": "/polls/apiurls.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from rest_framework import renderers\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .import api, views\n\nrouter = DefaultRouter()\nrouter.register('question', api.QuestionViewSet),\nrouter.register('choice', api.ChoiceViewSet),\n\nurlpatterns = [\n\tpath('', include(router.urls)),\n]" }, { "alpha_fraction": 0.7797029614448547, "alphanum_fraction": 0.7797029614448547, "avg_line_length": 24.3125, "blob_id": "084052d50c3ff0e22dc9bcc3167eb7ce2f37d680", "content_id": "f99b1d535fc82d79f31c9de377d9b445785f76ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/blog/apiurls.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from rest_framework import renderers\nfrom django.urls import path, include\nfrom rest_framework.routers import DefaultRouter\nfrom .import api, views\n\n\nrouter = DefaultRouter()\nrouter.register('post', api.PostViewSet)\nrouter.register('category', api.CategoryViewSet)\nrouter.register('tag', api.TagViewSet)\nrouter.register('comment', api.CommentViewSet)\n\nurlpatterns = [\n\t\n\tpath('', include(router.urls)),\n]" }, { "alpha_fraction": 0.6299540996551514, "alphanum_fraction": 0.6299540996551514, "avg_line_length": 27.535715103149414, "blob_id": "9408da44d29f429f5ecb71478f3c747b89463e83", "content_id": "6b794673402cfdbba58699be5cd5750ec54d892a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 84, "num_lines": 84, "path": "/blog/admin.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Post,Userprofile, Category, Tag, Comment\n\n# class postAdmin(admin.ModelAdmin):\n\t\n# \tview_on_site = True\n# \tfieldsets = [\n# \t\t(None, {'fields': ['title', 'text', 'author']}),\n# \t\t('Date information', {'fields': ['published_date'], 'classes': ['collapse']}),\n\n# \t]\n\t\n# \tlist_display = ('title', 'author', 'published_date', 'created_date')\n# \tlist_filter = ['published_date']\n# \tsearch_fields = ['title']\n\nclass PostAdmin(admin.ModelAdmin):\n\t\t\n\tview_on_site = True\n\tfieldsets = [\n\t\t(None, {'fields': ['title','slug', 'text', 'author', 'tag', 'category', 'cmnt']}),\n\t\t('Date information', {'fields': ['published_date'], 'classes': ['collapse']}),\n\n\t]\n\t\n\tlist_display = ('title', 'author', 'slug','published_date', 'created_date')\n\tlist_filter = ['published_date']\n\tsearch_fields = ['title']\n\tfilter_horizontal = ('tag',)\n\tprepopulated_fields = {'slug':(\"title\",)}\n\t# readonly_fields=('slug',)\n\n\nclass CategoryAdmin(admin.ModelAdmin):\n\t\t\n\t# view_on_site = True\n\tfieldsets = [\n\t\t(None, {'fields': ['title','text','slug','parent']}),\n\t\t('Date information', {'fields': ['created_date'], 'classes': ['collapse']}),\n\n\t]\n\t\n\tlist_display = ('title', 'text','slug','parent', 'created_date')\n\t# list_filter = ['published_date']\n\tsearch_fields = ['title']\n\t# filter_horizontal = ('tag',)\n\tprepopulated_fields = {'slug':(\"title\",)}\n\nclass TagAdmin(admin.ModelAdmin):\n\t\t\n\t# view_on_site = True\n\tfieldsets = [\n\t\t(None, {'fields': ['title','text','slug']}),\n\t\t('Date information', {'fields': ['created_date'], 'classes': ['collapse']}),\n\n\t]\n\t\n\tlist_display = ('title', 'text','slug','created_date')\n\t# list_filter = ['published_date']\n\tsearch_fields = ['title']\n\t# filter_horizontal = ('tag',)\n\tprepopulated_fields = {'slug':(\"title\",)}\n\nclass CommentAdmin(admin.ModelAdmin):\n\t\t\n\t# view_on_site = True\n\tfieldsets = [\n\t\t(None, {'fields': ['post','name','text','slug','active','parent']}),\n\t\t('Date information', {'fields': ['created'], 'classes': ['collapse']}),\n\n\t]\n\t\n\tlist_display = ('name', 'text','post','slug','created','updated')\n\t# list_filter = ['published_date']\n\tsearch_fields = ['name']\n\t# filter_horizontal = ('tag',)\n\tprepopulated_fields = {'slug':(\"name\",)}\n\n\nadmin.site.register(Post,PostAdmin)\nadmin.site.register(Userprofile)\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Tag,TagAdmin)\nadmin.site.register(Comment,CommentAdmin)\n" }, { "alpha_fraction": 0.7576953172683716, "alphanum_fraction": 0.7576953172683716, "avg_line_length": 37.42424392700195, "blob_id": "e5735dfdad78a361de5b267b809e04e802f942f9", "content_id": "30b10da9426ae63a7cb489e98ed44ada5fe1d4f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1267, "license_type": "no_license", "max_line_length": 89, "num_lines": 33, "path": "/blog/api.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from rest_framework import serializers, viewsets, status as status_code, generics, mixins\nfrom .serializers import *\nfrom .models import *\nfrom rest_framework.views import APIView\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated, AllowAny\nfrom django.contrib.auth import authenticate\nfrom rest_framework.response import Response\nfrom rest_framework import pagination\n\nclass PostViewSet(viewsets.ModelViewSet\t):\n \"\"\"\n API endpoint that allows posts to be viewed or edited.\n \"\"\"\n queryset = Post.objects.all().order_by('-published_date')\n serializer_class = PostSerializer\n # permission_classes = [permissions.IsAuthenticated] \n http_method_names = ['get','post','put','patch','delete']\n\nclass CategoryViewSet(viewsets.ModelViewSet):\n\tqueryset = Category.objects.all()\n\tserializer_class = CategorySerializer\n\thttp_method_names = ['get','post','put','patch','delete']\n\nclass TagViewSet(viewsets.ModelViewSet):\n\tqueryset = Tag.objects.all()\n\tserializer_class = TagSerializer\n\thttp_method_names = ['get','post','put','patch','delete']\n\nclass CommentViewSet(viewsets.ModelViewSet):\n\tqueryset = Comment.objects.all()\n\tserializer_class = CommentSerializer\n\thttp_method_names = ['get','post','put','patch','delete']" }, { "alpha_fraction": 0.72575443983078, "alphanum_fraction": 0.733665406703949, "avg_line_length": 32.70296859741211, "blob_id": "ba937eae245294fdf0dc096169fd4a93cffd613c", "content_id": "d407e9795e91a19fc6ad5a5e6eeb6cd82a16b93e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3413, "license_type": "no_license", "max_line_length": 118, "num_lines": 101, "path": "/blog/models.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django_extensions.db.fields import AutoSlugField\nfrom autoslug import AutoSlugField\n\nclass Category(models.Model):\n\ttitle= models.CharField(max_length=200)\n\ttext=models.TextField()\n\tslug = AutoSlugField(populate_from='title', max_length=160, editable=True)\n\tparent=models.ForeignKey('self', blank=True, null=True, related_name='children', on_delete=models.CASCADE)\n\tcreated_date = models.DateTimeField(default = timezone.now)\n\n\tclass Meta:\n\t\tverbose_name_plural = \"categories\" \n\n\tdef __str__(self):\n\t\treturn self.title\n\n\tdef slugify_function(self, content):\n\t\treturn content.replace('_', '-').lower()\n\nclass Tag(models.Model):\n\ttitle = models.CharField(blank=True, max_length=200)\n\ttext=models.TextField(default=True)\n\tslug = AutoSlugField(populate_from='title', max_length=160, editable=True)\n\tcreated_date = models.DateTimeField(default = timezone.now)\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\tdef slugify_function(self, content):\n\t\treturn content.replace('_', '-').lower()\n\nclass Post(models.Model):\n\ttag = models.ManyToManyField('Tag')\n\tcmnt=models.ForeignKey('Comment', null=True, blank=True,on_delete=models.CASCADE, related_query_name=\"posts\")\n\tcategory = models.ForeignKey('Category', null=True, blank=True, on_delete=models.CASCADE, related_query_name=\"posts\")\n\tauthor = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n\ttitle = models.CharField(max_length=200)\n\ttext = models.TextField()\n\tcreated_date = models.DateTimeField(default = timezone.now)\n\tpublished_date = models.DateTimeField(blank = True, null = True)\n\tslug = AutoSlugField(populate_from='title', max_length=160, editable=True)\n\t\n\n\tdef publish(self):\n\t\tself.published_date = timezone.now()\n\t\tself.save()\n\n\tdef __str__(self):\n\t\treturn self.title \n\n\tdef slugify_function(self, content):\n\t\treturn content.replace('_', '-').lower()\n\n# class Post(models.Model):\n# \tauthor = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n# \ttitle = models.CharField(max_length=200)\n# \ttext = models.TextField()\n# \tcreated_date = models.DateTimeField(default = timezone.now)\n# \tpublished_date = models.DateTimeField(blank = True, null = True)\n\t\n# \tdef get_absolute_url(self):\n# \t\treturn \"/post/%i\" % self.id\n\n# \tdef publish(self):\n# \t\tself.published_date = timezone.now()\n# \t\tself.save()\n\n# \tdef __str__(self):\n# \t\treturn self.title \n\nclass Userprofile(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE)\n\tuser_image=models.ImageField(upload_to='media/',blank=True)\n\n\tdef __str__(self):\n\t\treturn self.user.username\n\nclass Comment(models.Model):\n\tpost = models.ForeignKey('Post', on_delete=models.CASCADE, related_name='comments')\n\tname = models.CharField(max_length=200)\n\ttext = models.TextField()\n\tcreated = models.DateTimeField(default=timezone.now)\n\tupdated = models.DateTimeField(auto_now=True)\n\tactive = models.BooleanField(default=True)\n\tparent = models.ForeignKey('self', null=True, blank=True,on_delete=models.CASCADE, related_name='replies')\n\tslug = AutoSlugField(populate_from='name', max_length=160, editable=True)\n\n\tclass Meta:\n\t\t# sort comments in chronological order by default\n\t\tordering = ('created',)\n\n\t\n\tdef __str__(self):\n\t\treturn self.name\n\n\tdef slugify_function(self, content):\n\t\treturn content.replace('_', '-').lower()\n\n\t\n\t\t\n\t\n\n" }, { "alpha_fraction": 0.7068063020706177, "alphanum_fraction": 0.7068063020706177, "avg_line_length": 22.90625, "blob_id": "8d1cc4774679c1eb3417a74cc695fe85cd715fe7", "content_id": "8102da68c232596aabce2b5c6bf9f0febab0b72f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 764, "license_type": "no_license", "max_line_length": 70, "num_lines": 32, "path": "/blog/forms.py", "repo_name": "vinaybana/djangoapp", "src_encoding": "UTF-8", "text": "from django import forms\nfrom .models import Post, Userprofile, Category, Comment\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.forms import ModelForm\n\nclass PostForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Post\n\t\tfields = ('title', 'text',)\n\nclass ProfileForm(forms.ModelForm):\n\tclass Meta:\n\t\tmodel = Userprofile\n\t\tfields = ('user_image',)\n\n# class UserForm(forms.ModelForm):\n# \tclass Meta:\n# \t\tmodel = User\n# \t\tfields = ('username', 'first_name', 'last_name', 'email')\n\nclass categoryForm(forms.ModelForm):\n\n\tclass Meta:\n\t\tmodel = Category\n\t\tfields=('title' , 'text',)\n\nclass CommentForm(forms.ModelForm):\n\n class Meta:\n model = Comment\n fields = ('name', 'text')" } ]
11
syedadzha/Multithreaded-socket-server-with-client
https://github.com/syedadzha/Multithreaded-socket-server-with-client
7315c06b255d3034869b4a8fe4668ebe038dc1bd
bd7af7c964b7c843731daf095d0c993148a37daa
df463ff6dc80716e683e2706c5dbea6e66ee8127
refs/heads/master
2022-11-07T10:18:52.884868
2020-06-23T06:23:03
2020-06-23T06:23:03
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.504011332988739, "alphanum_fraction": 0.5143935680389404, "avg_line_length": 29.27142906188965, "blob_id": "5b234e3289ef7c960fddeb7d8ccbc3aad8939abf", "content_id": "8bf2522b302a14b1182cba87f3e8518b0bf145cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2119, "license_type": "no_license", "max_line_length": 76, "num_lines": 70, "path": "/server.py", "repo_name": "syedadzha/Multithreaded-socket-server-with-client", "src_encoding": "UTF-8", "text": "from socket import *\nimport threading\nimport datetime\n\nclass ClientThread(threading.Thread):\n def __init__(self, connect, address):\n threading.Thread.__init__(self)\n self.connectionSocket = connect\n self.addr = address\n\n def run(self):\n while True:\n try:\n message = connectionSocket.recv(2048)\n\n if not message:\n break\n print(\"message: \")\n print(message)\n filename = message.split()[1]\n f = open(filename[1:])\n outputdata = f.read()\n \n now = datetime.datetime.now()\n \n first_header = \"HTTP/1.1 200 OK\"\n\n header_info = {\n \"Content-Length\": len(outputdata),\n \"Keep-Alive\": \"timeout=%d,max=%d\" % (10, 100),\n \"Connection\": \"Keep-Alive\",\n \"Content-Type\": \"text/html\"\n }\n\n following_header = \"\\r\\n\".join(\"%s: %s\" % (\n item, header_info[item]) for item in header_info)\n print \"%s\\r\\n%s\\r\\n\\r\\n\" % (first_header, following_header)\n self.connectionSocket.send(\n \"%s\\r\\n%s\\r\\n\\r\\n\" % (first_header, following_header))\n self.connectionSocket.send(\"%s\\r\\n\" % (outputdata))\n self.connectionSocket.close()\n print \"Data sent, socket closed\"\n\n except IOError:\n \n self.connectionSocket.send(\"HTTP/1.1 404 Not Found\\r\\n\\r\\n\")\n self.connectionSocket.close()\n\n break\n \n\n\nserverSocket = socket(AF_INET, SOCK_STREAM)\n\nserverPort = int(raw_input(\"Enter Port Number : \"))\nserverSocket.bind(('', serverPort))\nserverSocket.listen(5)\nthreads = []\n \nwhile True:\n \n print('Ready to serve...')\n connectionSocket, addr = serverSocket.accept()\n print(\"addr:\\n\", addr)\n \n client_thread = ClientThread(connectionSocket, addr)\n client_thread.start()\n threads.append(client_thread)\n\t\nserverSocket.close()\n" }, { "alpha_fraction": 0.5708154439926147, "alphanum_fraction": 0.5965664982795715, "avg_line_length": 27.414634704589844, "blob_id": "c25de9e097da03ac58577fae33ff433422e223d1", "content_id": "4f863b9edf5d632cb29caeb78fcdb9e71325d83c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1165, "license_type": "no_license", "max_line_length": 84, "num_lines": 41, "path": "/client.py", "repo_name": "syedadzha/Multithreaded-socket-server-with-client", "src_encoding": "UTF-8", "text": "from socket import *\nimport sys\n\n\nserver_port = int(raw_input(\"Port Number:\"))\nfilename = raw_input(\"Search here:\")\nserver_host = \"127.0.0.1\"\n# server_port = 8080\nhost_port = \"%s:%s\" % (server_host, server_port)\nprint '\\n\\n'\ntry:\n client_socket = socket(AF_INET, SOCK_STREAM)\n client_socket.connect((server_host, int(server_port)))\n header = {\n \"first_header\": \"GET /%s HTTP/1.1\" % (filename),\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Language\": \"en-us\",\n \"Host\": host_port,\n }\n http_header = \"\\r\\n\".join(\"%s:%s\" % (\n item, header[item]) for item in header)\n print http_header\n client_socket.send(\"%s\\r\\n\\r\\n\" % (http_header))\n\n final = \"\"\n response_message = client_socket.recv(2048)\n print response_message\n while response_message:\n if \"HTTP/1.1 404 Not Found\\r\\n\\r\\n\" in response_message:\n final = \"File Not Found !!!\"\n break\n \n response_message = client_socket.recv(1024)\n final += response_message\n break\n\n client_socket.close()\n print \"final:\\n\", final\n\nexcept IOError:\n sys.exit(1)\n" }, { "alpha_fraction": 0.7067307829856873, "alphanum_fraction": 0.7323718070983887, "avg_line_length": 19.064516067504883, "blob_id": "d585578cc95b96c6fb883677298656a6aed10634", "content_id": "8363688e03d1008c3169891c791b07ae1dc1fbfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 624, "license_type": "no_license", "max_line_length": 89, "num_lines": 31, "path": "/README.md", "repo_name": "syedadzha/Multithreaded-socket-server-with-client", "src_encoding": "UTF-8", "text": "# Multithreaded-socket-server-with-client\nMultithreaded web server that is capable of serving multiple HTTP requests simultaneously\n\n### Requirement \nPython 2.7<br>\n\n### Run Server\nOpen commmand prompt/terminal and run:\n\n```\nE:\\Multithread http request>python2 server.py\nEnter Port Number : 8080\nReady to serve...\n```\nUser need to input port number for server to serve\n\n\n### Run Client\n\n```\nE:\\Multithread http request>python2 client.py\nPort Number:8080\nSearch here:hello.html\n```\n\n### Test server with brower\n\n```\nhttp://localhost:8080/hello.html\n```\n<div style=\"text-align:center\"><img src=\"/images/hello.JPG\" /></div>\n\n\n" } ]
3
honoriovega/cst205-proj2
https://github.com/honoriovega/cst205-proj2
7fc0fd592c9e58e90dba599dcc5f2fc67476fe90
9637d08f26e49b42c2c2154e38413284d0a02fd0
5f226356fa574abb61173bbe88bbadd3e2a51a78
refs/heads/master
2021-01-14T08:51:23.080907
2017-03-16T15:44:00
2017-03-16T15:44:00
81,980,613
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6807254552841187, "alphanum_fraction": 0.689723014831543, "avg_line_length": 27.110671997070312, "blob_id": "1ddbe6007cd462587575049f2b793be97ce7a9e0", "content_id": "5635c912099d241c7a05c9ec2962fd8ca7f3b999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7113, "license_type": "no_license", "max_line_length": 135, "num_lines": 253, "path": "/app.py", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "\"\"\"\n Course : CST205\n Title : app.py\n Authors: Javar Alexander, Honorio Vega, Antonio Villagomez\n Abstract : This program is the driver of the program. It sets up\n the server. It also broadcasts and sends new messages.\n\t\t\t aswell as saving the messages to a database. It pulls\n\t\t\t pictures from Getty and Giphy API's and sends them to\n\t\t\t the users\n Date : 03/15/2017\n Who worked on what: Javar and Honorio worked on this file. Javar\n\t\t\t\t\t wrote the spotify feature. Honorio worked on the\n\t\t\t\t\t database and received and sending images. All other\n\t\t\t\t feautures in this file were a combination of \n\t\t\t\t work from Javar and Honorio. For example, Javar\n\t\t\t\t worked on parts of the BOT and Honorio worked\n\t\t\t\t on it also\n\nGITHUB LINK : https://github.com/honoriovega/cst205-proj2\n\n\"\"\"\n\n\n\nimport random, os, flask, flask_socketio, flask_sqlalchemy,requests, time\nfrom random import randint, choice\nfrom flask_socketio import send\nimport gettyApi\nimport botcommands\n\nimport urlparse\nimport json\n\napp = flask.Flask(__name__)\n\n#app.config[ 'SQLALCHEMY_DATABASE_URI' ] = 'postgresql://potato:potatosareawesome@localhost/postgres'\napp.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = flask_sqlalchemy.SQLAlchemy(app)\n\nclass Message(db.Model):\n\tid = db.Column(db.Integer, primary_key=True)\n\ttext = db.Column(db.String(300))\n\tpicture = db.Column(db.String(200))\n\tname = db.Column(db.String(100))\n\tapiLink = db.Column(db.String(500))\n\n\tdef __init__(self, p,n,t,al=''):\n\t\tself.text = t\n\t\tself.picture = p\n\t\tself.name = n\n\t\tself.apiLink = al\n\n\tdef __repr__(self):\n\t\treturn '<%s %s: %s>' % (self.picture, self.name, self.text)\n\nsocketio = flask_socketio.SocketIO(app)\n\nall_messages = []\nall_connected_users = { };\nall_numbers = []\n\n# fetch all message from database and store them in dictionary \n# and append to a list\ndef fetchAllMessages():\n\tmessages = Message.query.all()\n\ttemp = []\n\n\tfor message in messages:\n\t\ttemp.append({\n\t\t'name': message.name,\n\t\t'picture': message.picture,\n\t\t'msg': message.text,\n\t\t'link' : message.apiLink\n\t\t})\n\n\treturn temp\n\n# broadcasst the messages\ndef fetchAndEmit():\n\tall_messages[:] = fetchAllMessages()\n\n\tsocketio.emit('all messages', {\n\t'messages': all_messages\n\t})\n\n# add message to our database\ndef addMessage(userPicture, name, msg):\n\tmessage = Message(userPicture,name, msg)\n\tdb.session.add(message)\n\tdb.session.commit()\n\n# add message to our database\ndef addBotMessage(msg):\n\tBOT_PICTURE = '/static/bot.jpg'\n\tBOT_NAME = 'Bender_from_futurama'\n\taddMessage(BOT_PICTURE,BOT_NAME,msg)\n\n# add message to our database\ndef addBotMessageAPI(link):\n\tBOT_PICTURE = '/static/bot.jpg'\n\tBOT_NAME = 'Bender_from_futurama'\n\taddPictureMessage(BOT_PICTURE,BOT_NAME,link)\n\n# add message to our database\ndef addPictureMessage(userPicture, name, apiLink):\n\tmessage = Message(userPicture,name, '', apiLink)\n\tdb.session.add(message)\n\tdb.session.commit()\n\n# this is where the app starts\[email protected]('/')\ndef hello():\n\tkeywords = ['technology','forest','background']\n\ta = gettyApi.initBackground(choice(keywords))\n\n\treturn flask.render_template('index.html',back=a)\n\n# When the user conencts call the fetchAndEmit command\n# which pulls the messages from the database and broadcasts them\[email protected]('connect')\ndef on_connect():\n\tfetchAndEmit()\n\n# this function was used for testing purposes\[email protected]('new number')\ndef on_new_number(data):\n\tall_numbers.append(100)\n\tsocketio.emit('all numbers', {\n\t\t\t'numbers' : all_numbers\n\t})\n\n\n# Function that Javar wrote. Fetches data from the Spotify API and display it\[email protected]('Spotify')\ndef spotify(data):\n\ttracks =[]\n\tsearchType = data['searchType']\n\tsearchQuery = data['searchQuery']\n\tsearchQuery1 = searchQuery.replace(\"+\", \"%20\")\n\tresponse = requests.get(\"https://api.spotify.com/v1/search?q=\"+searchQuery1+\"&type=\"+searchType)\n\tjson = response.json()\n\tif 'tracks' in json and 'items' in json['tracks']:\n\t\tfor item in json['tracks']['items']:\n\t\t\tprint item['uri']\n \t\ttracks.append(item['uri'])\n\n\tmy_headers = {\"Accept\" : \"application/json\", \"Authorization\" : \"Bearer BQCm9bzjiDxNb9FurI8AWVgraOhvdZyzpBBNq753DwEXocrLa8kyPNOalfXuevtiZ10Kt8FIuvM1RMnv6mWiVsz9bXU8VQzEv3xdHAE5Qs4-eFI4dh3spBArHnzQLl6gGqvddte-H7JZQVzJEsxobx1TSStfVqonFzxWdH418b5RtzgZMHFgnKtV-6qW9g_axQ1bKwQ4Fm8e1NI\"}\n\turl = \"https://api.spotify.com/v1/tracks/1zHlj4dQ8ZAtrayhuDDmkY\"\n\ttrack_response = requests.get(url, headers= my_headers)\n\tspotify_links = track_response.json()\n\trandom_track = random.choice(tracks)\n\n\trandom_track_link = \"https://embed.spotify.com/?uri=\"+random_track\n\tsocketio.emit('fromSpotify', random_track_link)\n\n\n# this function was ment as featuer to greet the user on log in\n# the feature was not implemented as their was issue. We didn't\n# want to remove it beacause it might break our code. For now it is just here\[email protected]('greet user')\ndef greet_user(data):\n\tpicture = ''\n\tUSERNAME = ''\n\tgreet = ''\n\n\tif(data['google_user_token'] == '' and data['facebook_user_token'] == ''):\n\t\tsend('greeting user')\n\n\telse:\n\n\t\tif(data['google_user_token'] == ''):\n\t\t\tresponse = requests.get('https://graph.facebook.com/v2.8/me?fields=id%2Cname%2Cpicture&access_token=' + data['facebook_user_token'])\n\t\t\tjson = response.json()\n\t\t\tUSERNAME = json['name']\n\t\t\tpicture = json['picture']['data']['url']\n\t\t\tgreet = 'Hello ' + USERNAME + ' logged in from Facebook'\n\n\t\telse:\n\t\t\tresponse = requests.get('https://www.googleapis.com/oauth2/v3/tokeninfo?id_token=' + data['google_user_token'])\n\t\t\tjson = response.json()\n\t\t\tpicture = json['picture']\n\t\t\tUSERNAME = json['name']\n\t\t\tgreet = 'Hello ' + USERNAME + ' logged in from Google'\n\n\t\taddBotMessage(greet)\n\n\t\tfetchAndEmit()\n\n# When a new message is received this function\n# stores it in the database, checks to see if it a bot command\n# or a link. \[email protected]('new msg')\ndef on_new_msg(data):\n\tfacebookAPI = 'https://graph.facebook.com/v2.8/me?fields=id%2Cname%2Cpicture&access_token='\n\tgoogleAPI = 'https://www.googleapis.com/oauth2/v3/tokeninfo?id_token='\n\n\tmsg = data['msg']\n\tUSERNAME = ''\n\tpicture = ''\n\tmsg = msg.strip()\n\tif(data['google_user_token'] == '' and data['facebook_user_token'] == '' ):\n\t\tsend('received message')\n\n\telse:\n\t\tif(data['google_user_token'] == ''):\n\t\t\tresponse = requests.get( facebookAPI + data['facebook_user_token'])\n\t\t\tjson = response.json()\n\t\t\tUSERNAME = json['name']\n\t\t\tpicture = json['picture']['data']['url']\n\n\t\telse:\n\t\t\tresponse = requests.get(googleAPI + data['google_user_token'])\n\t\t\tjson = response.json()\n\t\t\tpicture = json['picture']\n\t\t\tUSERNAME = json['name']\n\n\turl = msg\n\tparts = urlparse.urlsplit(url)\n\n\t# it is not a url so add it and emit\n\tif not parts.scheme or not parts.netloc:\n\n\t\tif('!! say' in msg):\n\t\t\tx = 10\n\t\telse:\n\t\t\taddMessage(picture,USERNAME, msg)\n\n\telse:\n\t\tprint \"yes an url\"\n\t\taddPictureMessage(picture,USERNAME,url)\n\t#fetchAndEmit()\n\t# handle bot command\n\tif(msg[:2] == '!!'):\n\t\tresponse = botcommands.processBotCommand(msg)\n\t\tif(len(response) > 4):\n\t\t\tif(response[:4] == 'http'):\n\t\t\t\taddBotMessageAPI( response )\n\t\t\telse:\n\t\t\t\taddBotMessage(response)\n\t\telse:\n\t\t\taddBotMessage(response)\n\n\tfetchAndEmit()\n\n# this gets the server up an running\nif __name__ == '__main__':\n\tsocketio.run(\n\t\tapp,\n\t\thost=os.getenv('IP', '0.0.0.0'),\n\t\tport=int(os.getenv('PORT', 8080)),\n\t\tdebug=True\n\t)\n\n" }, { "alpha_fraction": 0.5984252095222473, "alphanum_fraction": 0.748031497001648, "avg_line_length": 17.285715103149414, "blob_id": "b755358e0618a3616f0901cac6ff1d02a27fda94", "content_id": "59768749b501f2ca2a4666eb57a1d96011adb03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 127, "license_type": "no_license", "max_line_length": 24, "num_lines": 7, "path": "/requirements.txt", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "Flask==0.12\nFlask-SocketIO==2.8.5\nFlask-SQLAlchemy==2.2\npsycopg2==2.6.2\nrequests==2.13.0\nrequests-oauthlib==0.7.0\nFlask-Testing" }, { "alpha_fraction": 0.5407545566558838, "alphanum_fraction": 0.5542617440223694, "avg_line_length": 22.086021423339844, "blob_id": "62d0714e8cc9692a57654db229b0271e16c13f78", "content_id": "75d3abb4913fe7390c7952d61c6a23c6341b33a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2147, "license_type": "no_license", "max_line_length": 156, "num_lines": 93, "path": "/scripts/Chatroom.js", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "/*\n Course : CST205\n Title : index.html\n Authors: Honorio Vega, Javar Alexander\n Abstract : Client side code to display the messages. \n Date : 03/15/2017\n Who worked on what: Honorio wrote the handleLink function.\n\n*/\n\n\nimport * as React from 'react';\n\nimport { Button } from './Button';\nimport { Socket } from './Socket';\n//authors Honorio V.\n\n//this class checks links to see if their urls. If they are then we break them up by general urls, image urls, and youtube urls and display them approaitely\nexport class Chatroom extends React.Component {\n\n\t\t handleLink(link)\t\t\n\t\t{ \n\t\t\tconsole.log(\"checcking if url\");\n\t\t\t\n\t\t\tvar c = link.replace(/\\s/g,'');\n\t\t\tlink = c;\n\t\t\t// empty string do nothing\n\t\t if(link === '')\n\t\t \treturn;\n\t\t \t\n \tvar len = link.length;\n \tvar res = link.slice(len - 3, len);\n \t\n \tif(res === 'jpg' || res === 'png' || res === 'gif')\n\t\t\treturn <img src={link} />;\n\t\telse if(link.slice(len - 4, len) == 'jpeg')\n\t\t\treturn <img src={link} />;\n\t\t\n\t\telse if( link.slice(0, 12) === 'http://cache')\n\t\t\treturn <img src={link} />;\n\t\t\n\t\telse if(link.includes('getty')) {\n\t\t\t\n\t\t\t return <img src={link} />;\n\t\t}\n\t\t\n\t\telse if( link.includes('youtube.com')) {\n\t\t\t\n\t\t\t\n \t\tvar res = link.split(\"=\");\n\t\t\t\n\t\t\tvar ytlink = \"https://www.youtube.com/embed/\" + res[1];\n\t\t\treturn <iframe width=\"560\" height=\"315\" src={ytlink } ></iframe>;\n\t\t}\n\t\telse\n\t\t\treturn <a href={link} target=\"_blank\"> {link} </a>;\n\t\t}\n\t\t\n\t\thandleName(name)\n\t\t{\n\t\t\n\t\t\tif(name == 'Bender_from_futurama')\n\t\t\t\treturn <b>{name}</b>;\n\t\t\telse\n\t\t\t\treturn name;\n\t\t}\n\t\n\t\trender() {\n\t\t\n\t\t\n\t\t\n\t\t\t\tvar x = \"/static/BOT.jpg\";\t\n let allMessages = this.props.messages.map( (msg) =>\n <p id=\"msgtext\">\n <img id=\"photo\" style={{width : 100, height: 100}}src={msg.picture} /> {this.handleName(msg.name)}: &nbsp;\n {msg.msg}\n {this.handleLink(msg.link)}\n \t</p>\t);\n \t\n\t\t\t\n return (\n\t\t\t<div className='chatroom' >\n\t\t\t\t<div id='messageArea' className='msgArea'>\n\t\t\t\t{allMessages}\n\t\t\t\t</div>\n\t\t\t\t<br />\n\t\t\t\t<div className='sendMessageArea'>\n\t\t\t\t<Button name='Send Message'/>\n\t\t\t\t</div>\n\t\t\t</div>\n );\n }\n}\n" }, { "alpha_fraction": 0.6319862604141235, "alphanum_fraction": 0.6552020907402039, "avg_line_length": 28.820512771606445, "blob_id": "405e74ff269db5a8b550befc45c071cb279f81d8", "content_id": "556585c14db73783b0743d5019bc36f155583f00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 100, "num_lines": 39, "path": "/models.py", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "\"\"\"\n Course : CST205\n Title : models.py\n Author: Honorio Vega\n Abstract : This file is our database model. It defines our fields and\n\t\t\ttable. This file will be used to create the database\n\t\t\tin postgresql. \n Date : 03/15/2017\n Who worked on what: Honorio wrote this file. Consulted with Antonio\n\t\t\t\t\t and Javar for their input on what fields should\n\t\t\t\t\t be included\n\nGITHUB LINK : https://github.com/honoriovega/cst205-proj2\n\"\"\"\n\nfrom app import *\napp.config[ 'SQLALCHEMY_DATABASE_URI' ] = 'postgresql://potato:potatosareawesome@localhost/postgres'\n#app.app.config['SQLALCHEMY_DATABASE_URI'] = os.getenv('DATABASE_URL')\ndb = flask_sqlalchemy.SQLAlchemy(app)\n\n\nclass Message(db.Model):\n \n # The structure of our database\n id = db.Column(db.Integer, primary_key=True)\n text = db.Column(db.String(300))\n picture = db.Column(db.String(200))\n name = db.Column(db.String(100))\n apiLink = db.Column(db.String(500))\n\n \n def __init__(self, p,n,t,al=''):\n self.text = t\n self.picture = p\n self.name = n\n self.apiLink = al\n \n def __repr__(self):\n return '<%s %s: %s>' % (self.picture, self.name, self.text)\n" }, { "alpha_fraction": 0.7289473414421082, "alphanum_fraction": 0.7289473414421082, "avg_line_length": 41.11111068725586, "blob_id": "b7040af6b6c763578abe4e65d43eeed602817371", "content_id": "9e3fb9f939b85192cf49c7f7e6eaef3f9ad3cf2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 380, "license_type": "no_license", "max_line_length": 148, "num_lines": 9, "path": "/scripts/Main.js", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "\n//Javar & Honorio Worked on this\n\n//This file here is the js file included in our index. were telling it here to render all the stuff from content, and put it on our index html page \n//in the content div\n\nimport * as React from 'react';\nimport * as ReactDOM from 'react-dom';\nimport { Content } from './Content';\nReactDOM.render(<Content />, document.getElementById('content'));\n" }, { "alpha_fraction": 0.6819115877151489, "alphanum_fraction": 0.6952723264694214, "avg_line_length": 35.73584747314453, "blob_id": "cc15ac4618a5c6d4e1435a60b07466bae57a14d5", "content_id": "8624b76035a550c2af8f73b92ce341ad37ff47c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1946, "license_type": "no_license", "max_line_length": 99, "num_lines": 53, "path": "/gettyApi.py", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "\"\"\"\n Course : CST205\n Title : botcommands.py\n Authors: Javar Alexander, Honorio Vega\n Abstract : This file contains the commands for the getty api.\n\t\t\tIt contains two functions. One to fetch the background for\n\t\t\tthe chat app and the other to retrieve images based on a users\n\t\t\trequest. For example if a user types \"!! getty dogs\" the\n\t\t\tthe getImages command will take that string and parse it\n\t\t\tIt will extract the search term and make an api call to \n\t\t\tGetty. It will response a result and pick a random image\n\t\t\tand return it. \n Date : 03/15/2017\n Who worked on what: Honorio generated the API keys and wrote the\n\t\t\t\t\t getImages function. Javar wrote the initbackground\n\t\t\t\t\t function\n\nGITHUB LINK : https://github.com/honoriovega/cst205-proj2\n\"\"\"\n\n\nimport requests\nfrom random import randint,choice\n\ndef getImages(search_term):\n # fiels=detail_set\n url = \"https://api.gettyimages.com/v3/search/images?fields=detail_set&sort_order=best&phrase=\" \\\n + search_term + \"&page_size=100\"\n\n my_headers = { \"Api-Key\" : 'qwj5pp6xrv4td7djmab3jeec' }\n response = requests.get(url, headers = my_headers)\n json_body = response.json()\n \n length = len(json_body['images']) \n return json_body['images'][randint(0,length - 1)]['display_sizes'][0]['uri']\n\ndef initBackground(search_term):\n url = \"https://api.gettyimages.com/v3/search/images?fields=detail_set&sort_order=best&phrase=\" \\\n + search_term + \"&page_size=100\"\n\n my_headers = { \"Api-Key\" : 'qwj5pp6xrv4td7djmab3jeec' }\n response = requests.get(url, headers = my_headers)\n json_body = response.json()\n\n\t\t\t\t # holds ids asociated with the images using list comprehension\n idHolder = [ photoID['id'] for photoID in json_body['images'] ]\n \n #setting the randomly selected images id to a variable\n getty_images_id = choice(idHolder) \n \n #appending the variable to the HQ link to get images\n getty_image_source = \"http://media.gettyimages.com/photos/-id\" + getty_images_id \n return getty_image_source" }, { "alpha_fraction": 0.5584479570388794, "alphanum_fraction": 0.5589390993118286, "avg_line_length": 34.73684310913086, "blob_id": "5a76ef78fc6a31aa049930b47e0d0215258387c1", "content_id": "3286218f3ee22f058178ec2797d7265966636c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2036, "license_type": "no_license", "max_line_length": 164, "num_lines": 57, "path": "/scripts/Sound.js", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "//Written by Javar A. code sets up our view for spotify, and allows it to talk to our backend sockets. \n\n\nimport * as React from 'react';\nimport { Socket } from './Socket';\n\nexport class Sound extends React.Component {\n constructor(props) {\n super(props);\n this.state = {\n track : \" \"\n };\n }\n //here were checking to see if someone sent spotify data and if so, let's capture that data\n componentDidMount() {\n Socket.on('fromSpotify' ,(data) =>{\n this.setState({\n 'track' : data \n });\n //let's make the spotify widget visible \n document.getElementById(\"Spotifyframe\").style.visibility =\"visible\";\n });\n }\n //this is what handles our spotify search button queries. Once the user hits submit that data is sent through a socket to the server and processed server side. \n handleSubmitMusic(event) {\n event.preventDefault();\n var searchType = document.getElementById('SearchFor').value;\n var searchQuery = document.getElementById('searchQuery').value;\n Socket.emit('Spotify' , {\n 'searchType' : searchType, \n 'searchQuery' : searchQuery,\n });\n }\n //here is where we piece everything together and render the spotify button on the screen, search box and button. \n render() {\n return (\n <div> \n <div>\n <br />\n <br />\n <iframe id =\"Spotifyframe\" src={this.state.track} frameborder=\"0\" allowtransparency=\"true\"></iframe>\n </div>\n <div className = \"spotifyinput\">\n <form onSubmit={this.handleSubmitMusic}>\n <select id = \"SearchFor\">\n <option value=\"Track\" >Track</option>\n </select>\n <input type = \"text\" id = \"searchQuery\" name=\"searchQuery\"/>\n <input type=\"submit\" id = \"submit\" value = \"search song!\" />\n </form>\n </div>\n <div>\n </div>\n </div>\n );\n }\n}" }, { "alpha_fraction": 0.5729023218154907, "alphanum_fraction": 0.5839064717292786, "avg_line_length": 28.98969078063965, "blob_id": "54dab71dab9563bed6081ed39eb323cfddb7600c", "content_id": "0179b8ee72f1812855fadd401f330d9ffe6a7729", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2908, "license_type": "no_license", "max_line_length": 104, "num_lines": 97, "path": "/botcommands.py", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "\"\"\"\n Course : CST205\n Title : botcommands.py\n Authors: Javar Alexander, Honorio Vega\n Abstract : This contains the possible commands that the bot can do.\n\t\t It can be made to repeat what a user said. It can also be\n\t\t made to fetch pictures and gif's from Getty and Giffy\n\t\t respectively. \n Date : 03/15/2017\n Who worked on what: Honorio worked on parsing and processing the text.\n\t\t\t\t Javar wrote on the functions called that called\n\t\t\t\t external API's\n\nGITHUB LINK : https://github.com/honoriovega/cst205-proj2\n\n\"\"\"\n\nfrom random import randint,choice\nimport gettyApi\nimport json\nimport urllib\ndef processBotCommand(userSubmitted):\n\n recognizedCommands = ['say','about','help','backwards','doMath','getty','giffy']\n\n if('!! about' in userSubmitted):\n\n msg = 'website created by Honorio Vega, Javar Alexander, Antonio Villagomez'\n return msg\n\n elif('!! say' in userSubmitted):\n msg = userSubmitted.split('!! say')[1]\n return msg.strip()\n\n elif('!! backwards' in userSubmitted):\n msg = userSubmitted.split('!! backwards')[1]\n backwards = \"\".join(list(reversed(msg)))\n return backwards.strip()\n\n elif('!! doMath' in userSubmitted):\n a = randint(1,100)\n b = randint(1,100)\n currentTime = \"%d + %d = %d\" % (a,b,a+b)\n\n messagesend = str(currentTime)\n return messagesend.strip()\n\n elif('!! help' in userSubmitted):\n msg = 'I recognize these commands: ' + \", \".join(recognizedCommands)\n return msg\n\n elif('!! getty' in userSubmitted):\n\n searchTerm = userSubmitted.split('!! getty')[1]\n img = gettyApi.getImages(searchTerm)\n return img\n \n elif('!! giffy' in userSubmitted):\n searchTerm = userSubmitted.split('!! giffy')[1]\n query = searchTerm.replace(' ','+')\n \n link = \"http://api.giphy.com/v1/gifs/search?q=\" + query + \"&api_key=dc6zaTOxFJmzC&limit=5\"\n \n data = json.loads(urllib.urlopen(link).read())\n \n # no results :-*(\n if(len(data['data']) == 0 ):\n return \"Sorry I didn't find any gif's with that search term\"\n \n else:\n \n apilink = data['data'][randint(0,len(data['data']) -1)]['images']['downsized_medium']['url']\n return apilink\n \n\n\n else:\n msg = 'command not recognized'\n return msg\n\ndef sayBye(name):\n\trandomPhrases = [\"Leave and don't come back \", \"Get out of here \", \"Good ridance \",\n\t\t\t\t\t \"Be gone \"]\n\t\n\treturn choice(randomPhrases) + name\n\ndef greetNewUser(name):\n\tphrases = ['Wassup, ', 'YO ', \n\t \"How's it going \", \"Hey there \", \"Howdy \"]\n\treturn choice(phrases) + name\n\ndef randomPhrase():\n phrases = ['there you happy ? ',\n 'i am at your command for now ...', \n 'computers will take over',\n 'i will be issuing commands to you soon...']\n return choice(phrases)" }, { "alpha_fraction": 0.5963263511657715, "alphanum_fraction": 0.5980350375175476, "avg_line_length": 24.45652198791504, "blob_id": "e0f51c0e4cc3f41459dae447d6997ba54e725ecc", "content_id": "e583c86ff9dfe638b0256bdc37af36f362ba75f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2341, "license_type": "no_license", "max_line_length": 96, "num_lines": 92, "path": "/scripts/Button.js", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "import * as React from 'react';\n\nimport { Socket } from './Socket';\n//Filename: Button.js\n//authors Javar A, Honorio V. Antonio V. We all worked on this together. \n//making buttons here that deal with sending message data from the client to the server. \nexport class Button extends React.Component {\n \n \n \n \n handleSubmit(event) {\n event.preventDefault();\n\n let random = Math.floor(Math.random() * 100);\n console.log('bruh i made a new number : ', random);\n console.log('sahhhh dude : ');\n \n // grabbing the contents of the textbox and storing them\n var referenceToMessage = document.getElementById('msg');\n\t\tvar newMsg = referenceToMessage.value;\n\t\treferenceToMessage.value = \"\";\n\t\n\n//resetting the value of the textbox. \n\tdocument.getElementById('msg').value = \" \";\n\t\n//before sending the message, checking to see if the user is authenticated\nFB.getLoginStatus((response) => {\nif (response.status == 'connected') {\n \n \n var header = document.getElementById(\"banner\");\n header.innerHTML = \"\";\n console.log(\"facbook user is logged in\");\n Socket.emit('new msg', {\n 'google_user_token': '',\n 'facebook_user_token':\n response.authResponse.accessToken,\n 'number': random,\n 'msg' : newMsg\n\n });\n \n} else {\n\n\nlet auth = gapi.auth2.getAuthInstance();\nlet user = auth.currentUser.get();\nif (user.isSignedIn()) {\n console.log(\"google logged in\");\n var header = document.getElementById(\"banner\");\n header.innerHTML = \"\";\n\n Socket.emit('new msg', {\n 'google_user_token':\n user.getAuthResponse().id_token,\n 'facebook_user_token': '',\n 'number': random,\n 'msg': newMsg\n });\n \n}\n//letting the user know to sign in when attempting to send a message without being authenticated\nelse {\n var header = document.getElementById(\"banner\");\n header.innerHTML = \"You must be logged in to message!\";\n}\n\n}\n\n});\n\n console.log('Sent up the random number to server!');\n \t\t \n}\n\n\n render() {\n return (\n <div>\n <form onSubmit={this.handleSubmit}>\n <div className =\"enjoy-css\">\n\t\t\t\t <input type = \"text\" id = \"msg\" name=\"lname\"/>\n\t\t\t</div>\n <button>{this.props.name}</button>\n </form>\n </div>\n \n );\n }\n}" }, { "alpha_fraction": 0.48685404658317566, "alphanum_fraction": 0.4895738959312439, "avg_line_length": 23.511110305786133, "blob_id": "0f60421858ae462f97fe086789dcfeaa96742c82", "content_id": "567b80fc304024db5be741bac9baab31f644932a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3309, "license_type": "no_license", "max_line_length": 135, "num_lines": 135, "path": "/scripts/Content.js", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "//Content.JS\n//Authors Honorio V. & Javar Alexander - Worked on this together\n//this is the file that takes in user messages from the server \n\nimport * as React from 'react';\nimport { Button } from './Button';\nimport { Socket } from './Socket';\nimport { Chatroom } from './Chatroom';\nimport {Sound} from './Sound';\n\nexport class Content extends React.Component {\n constructor(props) {\n super(props);\n this.state = {\n\t\t\t'messages': [],\n 'numbers': [],\n 'my name': 'I dont have a name yet :(',\n 'all users': []\n };\n }\n \n\n \n\n componentDidMount() {\n\n Socket.on('all numbers', (data) => {\n this.setState({\n 'numbers': data['numbers']\n });\n \n });\n \n \nfunction tryToGreet() {\n FB.getLoginStatus((response) => {\n if (response.status == 'connected') {\n \n \n //here were checking to see if the user is connected via fb or google, if so we send their respective oAuth token to the server\n Socket.emit('greet user', {\n 'google_user_token': '',\n 'facebook_user_token':\n response.authResponse.accessToken\n });\n } else {\n \n \n let auth = gapi.auth2.getAuthInstance();\n let user = auth.currentUser.get();\n if (user.isSignedIn()) {\n \n \n \n Socket.emit('greet user', {\n 'google_user_token':\n user.getAuthResponse().id_token,\n 'facebook_user_token': ''\n });\n }\n \n }\n \n });\n } \n \n //here were grabbing all messages send into the chat room and storing them in messages\n Socket.on('all messages', (data) => {\n this.setState({\n 'messages': data['messages']\n });\n \n \n \tvar objDiv = document.getElementById(\"messageArea\");\n\t\t\t\tobjDiv.scrollTop = objDiv.scrollHeight;\n \n\n })\n \n \n Socket.on('server generated a new name', (data) => {\n console.log('Got a new name from server:', data);\n this.setState({\n 'my name': data['name'],\n })\n });\n //this was susposed to display a whole list of users, but is broken\n Socket.on('list of all users', (data) => {\n console.log('Got a list of all users from the server:', data);\n this.setState({\n 'all users': data['users']\n })\n console.log('New state:', this.state);\n });\n }\n\n render() {\n\n\n \n\n \n let all_users = this.state['all users'].map(\n\t\t\t(user) => <li key={user}>{user}</li>\n\t\t);\n\t//rendering buttons and the spotify container\n\t\t\n return (\n <div>\n <h1>CST 205 - Project 2</h1>\n \n\n <div\nclassName=\"fb-login-button\"\ndata-max-rows=\"1\"\ndata-size=\"medium\"\ndata-show-faces=\"false\"\ndata-auto-logout-link=\"true\">\n</div>\n<div>\n <div onClick={this.tryToGree}\n className=\"g-signin2\"\n data-theme=\"dark\">\n </div>\n <Chatroom messages={this.state.messages}/>\n </div>\n \n <div className = \"spotifyContainer\">\n <Sound/>\n </div>\n </div>\n \n );\n }\n}\n" }, { "alpha_fraction": 0.7323688864707947, "alphanum_fraction": 0.7516576051712036, "avg_line_length": 58.10714340209961, "blob_id": "1c37e6ed7ac365decaf9816fc0e18b50d64c93f2", "content_id": "e54b2f4a00b6f78f7c0bf96cd5400d11ec60ff9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1659, "license_type": "no_license", "max_line_length": 212, "num_lines": 28, "path": "/README.md", "repo_name": "honoriovega/cst205-proj2", "src_encoding": "UTF-8", "text": "# CST-205 - Project 2\n## Authors: Javar Alexander, Honorio Vega, Antonio Villagomez\n## CST 205\n## 03/16/2016\n## to run the program, go here: <a href=\"https://cst205-project2.herokuapp.com/\">https://cst205-project2.herokuapp.com/</a> click sign in,\nyou can authenticate with either facebook or google. \n\n## github repo <a href=\"https://github.com/honoriovega/cst205-proj2\">https://github.com/honoriovega/cst205-proj2</a>\n### <b> Note about github </b> We used one cloud 9 workspace. So all the pushes / commits show under Honorio's name since he created the repo. However, we all contributed commited files and pushed them to github.\n\n\n## Project Desciption\n\n\n### For this project, we decided to make a web app where users can login and chat with one another. We added external apis such as\nGetty images for dynamic background images, spotify for our group music player. We also implemented giffy so users can send gifs in chat. \nThink of this as basically the structure for a web based slack clone. There is a bot that the user can interact with. \n\n## Future work\n### We think it'll be cool if we were able to fully implement the youtube serch api with visual results. Also we bounced around the idea\nwith implementing socketio video chat with the python backend and client side in JS.\n\n## Who worked on what? \n### Javar and Honorio worked on server side, setting up the chat functionality and bot commands, ReactJS, and Spotify integration. \n### Antonio focused more on client side views (CSS). \n\n\n#### Note about the file in static/script.js < this file is automatically generated by webpack. it compiles all our js files into one essentially to it can send it to the virtual DOM\n\n\n\n\n" } ]
11
SavatarRUS/My_Python-DZ
https://github.com/SavatarRUS/My_Python-DZ
339ac46535c6257644d07d14f21e745c8493fcb7
53a32866b55d802585a77443338b7240543f24f8
7e09c2e4f3b4aab8be66f6de2abddc1680294135
refs/heads/main
2023-08-07T10:09:05.972031
2021-09-06T16:19:34
2021-09-06T16:19:34
398,002,792
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.71875, "alphanum_fraction": 0.7211538553237915, "avg_line_length": 40.599998474121094, "blob_id": "e2d25d14888025f7d818088203ab2730f9911eb9", "content_id": "d7b6d332207e657102aee3dffcbcce7499c1073c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 102, "num_lines": 10, "path": "/Task_5-5.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.\n# Программа должна подсчитывать сумму чисел в файле и выводить ее на экран.\n\ninput_num = input(\"Введите набор чисел, разделенных пробелами: \")\nnumbers = [int(i) for i in input_num.split()]\n\nwith open(\"my_file_5.txt\", \"w\") as f_obj:\n f_obj.write(input_num)\n\nprint(\"Сумма набора чисел: \" + str(sum(numbers)))\n" }, { "alpha_fraction": 0.6918767690658569, "alphanum_fraction": 0.6918767690658569, "avg_line_length": 28.75, "blob_id": "6c8dcc267988dd1dfa13c69628cdbabf862a27fb", "content_id": "f64699478221fecdc808c38f87da6aecb3e265e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 104, "num_lines": 12, "path": "/Task_5-1.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Создать программно файл в текстовом формате, записать в него построчно данные, вводимые пользователем.\n# Об окончании ввода данных свидетельствует пустая строка.\n\nmy_file = open('my_file.txt', 'w')\n\nwhile True:\n user_text = input('Введите новую строку: ')\n if user_text == '':\n break\n my_file.writelines(user_text + '\\n')\n\nmy_file.close()\n" }, { "alpha_fraction": 0.6313617825508118, "alphanum_fraction": 0.6561210751533508, "avg_line_length": 37.26315689086914, "blob_id": "df4ddd706fa26ebed09da79367b201c9a04c1588", "content_id": "a9908bd720f2c52fa5e9bff477a958bf25d1dc48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 986, "license_type": "no_license", "max_line_length": 91, "num_lines": 19, "path": "/Task_5-4.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Создать (не программно) текстовый файл со следующим содержимым:\n# One — 1\n# Two — 2\n# Three — 3\n# Four — 4\n# Необходимо написать программу, открывающую файл на чтение и считывающую построчно данные.\n# При этом английские числительные должны заменяться на русские.\n# Новый блок строк должен записываться в новый текстовый файл.\n\nnew_file_4 = []\neng_to_rus = {'One': 'Один', 'Two': 'Два', 'Three': 'Три', 'Four': 'Четыре'}\n\nwith open('my_file_4.txt', 'r', encoding='utf-8') as my_file_4:\n for line in my_file_4:\n line = line.split(' ', 1)\n new_file_4.append(eng_to_rus[line[0]] + ' ' + line[1])\n\nwith open('my_file_4_new.txt', 'w', encoding='utf-8') as my_file_4_new:\n my_file_4_new.writelines(new_file_4)\n" }, { "alpha_fraction": 0.6967340707778931, "alphanum_fraction": 0.699844479560852, "avg_line_length": 52.58333206176758, "blob_id": "6eee5092faf44e52a7e30574737980e18d02e7ef", "content_id": "d96a385bd3f7876829fcb6637b8f0ef7baa8648e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 955, "license_type": "no_license", "max_line_length": 118, "num_lines": 12, "path": "/Task_5-6.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный предмет и наличие лекционных,\n# практических и лабораторных занятий по этому предмету и их количество.\n# Важно, чтобы для каждого предмета не обязательно были все типы занятий.\n# Сформировать словарь, содержащий название предмета и общее количество занятий по нему. Вывести словарь на экран.\n\np_dict = {}\nwith open(\"my_file_6.txt\", encoding=\"UTF-8\") as f_o:\n for line in f_o:\n name, stats = line.split(\":\")\n n_sum = sum(map(int, \"\".join([i for i in stats if i == \" \" or i.isdigit()]).split()))\n p_dict[name] = n_sum\nprint(p_dict)\n" }, { "alpha_fraction": 0.6629032492637634, "alphanum_fraction": 0.6887096762657166, "avg_line_length": 61, "blob_id": "f7b5fd2873a3beffbdfe9350ca34737086d12974", "content_id": "0bd6e3d06e15e53e8b5a4c29a24936a4b7822243", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 865, "license_type": "no_license", "max_line_length": 115, "num_lines": 10, "path": "/Task_5-3.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Создать текстовый файл (не программно), построчно записать фамилии сотрудников и величину их окладов. Определить,\n# кто из сотрудников имеет оклад менее 20 тыс., вывести фамилии этих сотрудников.\n# Выполнить подсчет средней величины дохода сотрудников\nwith open(\"my_file_3.txt\", \"r\", encoding=\"utf-8\") as ob:\n salary_dict = {line.split()[0]: float(line.split()[1]) for line in ob}\n # print(salary_dict)\n for e in salary_dict.items():\n if e[1] < 20000:\n print(f\"{e[0]} зарабатывает менее 20к\")\n print(f\"Средняя велечина дохода = {round(sum(salary_dict.values()) / len(salary_dict), 3)}\")\n" }, { "alpha_fraction": 0.6426513195037842, "alphanum_fraction": 0.6570605039596558, "avg_line_length": 37.66666793823242, "blob_id": "b7bd79caee3dc19977df9a0e747e498a31561ffa", "content_id": "49064cee757abc50fa0a74e226444a6673219fcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 487, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/Task_5-2.py", "repo_name": "SavatarRUS/My_Python-DZ", "src_encoding": "UTF-8", "text": "# Создать текстовый файл (не программно), сохранить в нем несколько строк,\n# выполнить подсчет количества строк, количества слов в каждой строке.\n\nwith open('my_file_2.txt', 'r') as my_file_2:\n count = 0\n for line in my_file_2:\n count +=1\n print(f'В строке №{count} {len(line.split())} слов')\n print(f'Всего строк: {count}')" } ]
6
patrickberry1/FinalYearProject
https://github.com/patrickberry1/FinalYearProject
817a0e8eda079e4dbb542a69efeb3900a723a16f
158ff0d631210d10d3ee79f34eb276ecd97f059c
4026f419a62cf41eeff479652225000af305f3d0
refs/heads/master
2021-05-10T22:55:57.275503
2018-05-02T16:21:34
2018-05-02T16:21:34
118,270,915
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6948052048683167, "alphanum_fraction": 0.7532467246055603, "avg_line_length": 21.14285659790039, "blob_id": "fd8a7d444591fcbec71b583646520b3e52b94d58", "content_id": "8dd6cebb9a766ce09f8ea8ad6e1a789109b9a682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 154, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/tests/fuzzycomparisontest.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "from fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\n\nstr1 = \"r2 r2d2 where are you\"\nstr2 = \"r2d2 where are you\"\n\nprint(str(fuzz.ratio(str1, str2)))" }, { "alpha_fraction": 0.7538461685180664, "alphanum_fraction": 0.7538461685180664, "avg_line_length": 9.833333015441895, "blob_id": "7b710e9caaa36df16887cbdf09346b043422da74", "content_id": "d29aee2342f9fc5bce5407d4d09468a29fb10957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 16, "num_lines": 6, "path": "/tests/scrape.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport os\n\nname = \"Patrick\"\nprint(name)\n" }, { "alpha_fraction": 0.5361567139625549, "alphanum_fraction": 0.5476807951927185, "avg_line_length": 26.99193572998047, "blob_id": "c7be0271262ac2495f006a3f839cec5c8d395568", "content_id": "19f6fe3f4011d6cf24dc5160df71ebd22a0a6c60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3471, "license_type": "no_license", "max_line_length": 78, "num_lines": 124, "path": "/jsonreader.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import json\nfrom pprint import pprint\nfrom fuzzywuzzy import fuzz\nfrom fuzzywuzzy import process\n\n#\n# opening files\n#\n\n# inputs\ntimings = open(\"files/sw4_timings.txt\", \"r\")\nsubs = open(\"files/sw4_fs.txt\", \"r\")\nwith open(\"files/sw4_parsed_script.json\") as json_data:\n d = json.load(json_data)\nwith open(\"files/sw4_parsed_script.json\") as json_data:\n new_d = json.load(json_data)\n\n# outputs\nnew_json1 = open(\"files/sw4_speech.json\", \"w+\")\nnew_json2 = open(\"files/sw4_script.json\", \"w+\")\n\nsub_list = []\n\nfor line in subs:\n sub_list.append(line)\n\n# splitting times into start and end times\ntiming_list = []\nfor line in timings:\n split_time = line.split('-->')\n timing_list.append(split_time)\n\n#\n# ---------- fuzzy string comparison ----------\n#\nsub_index = 0\nsub_len = len(sub_list)\ntime_len = len(timing_list)\nstart_time = ''\nend_time = ''\nsub_text = ''\n\n# for j in d['movie_script']:\n# \tscr_text = j['text']\n# \tstart_index = sub_index\n# \tcurr_ratio = 1\n# \tprev_ratio = 0\n# \tsub_text = \"\"\n\n# \twhile curr_ratio > prev_ratio and sub_index < sub_len:\n# \t\tsub_text = sub_text + sub_list[sub_index]\n# \t\tprev_ratio = curr_ratio\n# \t\tcurr_ratio = fuzz.ratio(sub_text, scr_text)\n# \t\tsub_index = sub_index + 1\n\n# \tsub_index = sub_index - 1\n# \tj['start_time'] = timing_list[start_index][0]\n# \tj['end_time'] = timing_list[sub_index-1][1]\n\n\nfor j in d['movie_script']:\n if j['type'] == 'speech':\n j['start_time'] = ''\n j['end_time'] = ''\n scr_text = j['text']\n start_index = sub_index\n curr_ratio = 1\n prev_ratio = 0\n sub_text = \"\"\n temp_sub_index = sub_index + 1\n\n while curr_ratio > prev_ratio and sub_index < sub_len:\n sub_text = sub_text + sub_list[sub_index]\n prev_ratio = curr_ratio\n curr_ratio = fuzz.ratio(sub_text, scr_text)\n sub_index = sub_index + 1\n\n if curr_ratio < 50 and prev_ratio < 50:\n sub_index = temp_sub_index\n else:\n sub_index = sub_index - 1\n st = timing_list[start_index][0]\n st = st.replace(' ', '')\n j['start_time'] = st\n if sub_index == sub_len - 1:\n et = timing_list[sub_index][1]\n et = et.replace(\"\\n\", \"\")\n et = et.replace(\" \", \"\")\n j['end_time'] = et\n else:\n et = timing_list[sub_index - 1][1]\n et = et.replace('\\n', '')\n et = et.replace(' ', '')\n j['end_time'] = et\n\n# dropping all stage direction, locations etc. from json file for time being\nnew_script = []\n\nfor i in d['movie_script']:\n if i['type'] == 'speech':\n temp_text = i['text']\n tt_list = list(temp_text)\n tt_index = 0\n while tt_index < len(tt_list):\n if tt_list[tt_index] == '\\n':\n tt_list[tt_index] = ' '\n elif not tt_list[tt_index].isalnum() and tt_list[tt_index] != ' ':\n tt_list[tt_index] = ''\n tt_index = tt_index + 1\n temp_text = ''.join(tt_list)\n i['text'] = temp_text.lower()\n new_script.append(i)\n\nnew_d['movie_script'] = new_script\n\n# sample_text = d['movie_script'][0]['text']\n# sample_sub = ''\n# while sub_index < 10:\n# \tsample_sub = sample_sub + sub_list[sub_index]\n# \tpprint(fuzz.ratio(sample_text, sample_sub))\n# \tsub_index = sub_index + 1\n\nnew_json1.write(json.dumps(new_d, indent=2))\nnew_json2.write(json.dumps(d, indent=2))\n" }, { "alpha_fraction": 0.5384253263473511, "alphanum_fraction": 0.5419099926948547, "avg_line_length": 28.412742614746094, "blob_id": "38c2622ddcb8f25f8f17448c14bd958194412de3", "content_id": "87cb039dcb6aae1d39a68f489dab2f168eb52896", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10618, "license_type": "no_license", "max_line_length": 106, "num_lines": 361, "path": "/sw4specific/format.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# coding: utf-8\n\nimport random, sys, os, argparse, json, re\nfrom bs4 import BeautifulSoup, Tag, UnicodeDammit\nfrom fuzzywuzzy import fuzz\n\n#\n#\n# ------------------ formatting script file --------------------\n#\n#\n\n# argparser= argparse.ArgumentParser(description='')\n# argparser.add_argument('script_url', metavar='script_url', type=str, nargs='?',\n# help='URL of the webpage containing the movie script')\n\n# args=argparser.parse_args()\n\n\n# # loop until we get a valid script_url\n\n# script_url = ''\n# is_webpage_fetched = False\n# while not is_webpage_fetched:\n# # get the script's URL from the parameters if it was passed\n# if( script_url == '' and args.script_url != None ):\n# script_url = args.script_url\n# else:\n# print('Enter an imsdb.com sript url:')\n\n# script_url = input('--> ')\n\n# try:\n# request = urllib.request.Request(script_url)\n# webpage_bytes = urllib.request.urlopen(request)\n# soup = BeautifulSoup(webpage_bytes, 'lxml')\n# print('Detected encoding is ', soup.original_encoding)\n# is_webpage_fetched = True\n# except urllib.error.URLError as err:\n# print('Caught a URLError while fetching URL:', err)\n# print()\n# pass\n# except ValueError as err:\n# print('Caught a ValueError while fetching URL:', err)\n# print()\n# pass\n# except:\n# print('Caught unrecognized error')\n# raise\n# else:\n# script_text = soup.find(\"pre\")\n\n# # script text identified by pre tag in html\n# # checking for pre tag within first pre tag\n\n# if( script_text.find(\"pre\") ):\n# script_text = script_text.find(\"pre\")\n\n# is_webpage_fetched = True\n\n\n\n# # script dict to be serialized as JSON\n# script=dict()\n\n\n# # Insert movie URL into dict\n# script['movie_url'] = request.full_url\n\n# # Insert movie name into dict\n# answer = 'n'\n# while (answer == 'n' or answer == 'N'):\n# script['movie_title'] = input(\"Enter movie title: \")\n\n\n# BLOCK_TYPES=['character', 'speech', 'stage direction', 'location']\n# CHARACTER=0\n# SPEECH=1\n# DIRECTIONS=2\n# LOCATION=3\n\n\n# # COMPILE ALL THE REGULAR EXPRESSIONS!\n# spaces_regex = re.compile(\"^(\\s*).*\")\n# location_regex = re.compile(\"^\\s*(INT\\.|EXT\\.)\")\n\n\n# #\n# # Function for determining and defining block types based on leading spaces\n# #\n# def get_line_type(line, stripped_line, usual_spaces, characters):\n# # Counting the number of spaces at the beginning of the line\n# spmatch = spaces_regex.search(line)\n# spaces_number = len(spmatch.group(1))\n# block_type = 0\n\n# if( location_regex.search(line) != None ):\n# return LOCATION\n\n# if stripped_line in characters:\n# return CHARACTER\n\n# # Look for space\n# for block_type_usual_spaces in usual_spaces:\n# if spaces_number in block_type_usual_spaces:\n# block_type = usual_spaces.index(block_type_usual_spaces)\n# return usual_spaces.index(block_type_usual_spaces)\n\n# print('There are {:d} space(s) at the beginning of this line'.format(spaces_number))\n# question = \"What kind of block is this?\\n\"\n# for i in range(len(BLOCK_TYPES)):\n# question += '\\t('+str(i)+') ' + BLOCK_TYPES[i] + '\\n'\n# print(question)\n\n# validated = False\n# while(validated == False):\n# try:\n# block_type = int(input('? [0-{:d}] '.format(len(BLOCK_TYPES)-1)))\n# while( block_type < 0 or block_type >= len(BLOCK_TYPES)):\n# block_type = int(input('? [0-{:d}] '.format(len(BLOCK_TYPES)-1)))\n# except ValueError:\n# continue\n\n# validated = True\n# answer = input('You said the last block type was \\'{:s}\\', sure about that? (Y/n) '.format(\n# BLOCK_TYPES[block_type]))\n# if( answer == 'n' or answer =='N' ):\n# validated = False\n\n# remember_spaces = False\n# validated = False\n# while( validated == False):\n# answer_spaces = input('Are all lines with {:d} leading spaces \\'{:s}\\' blocks ? (Y/n) '.format(\n# spaces_number, BLOCK_TYPES[block_type]))\n\n# if( answer_spaces == 'n' or answer_spaces =='N' ):\n# print('You said no: we will ask you again next time.')\n# remember_spaces = False\n# else:\n# print('You said yes: ' +\n# 'every new block with {:d} leading spaces '.format(spaces_number) +\n# 'will now be considered a \\'{:s}\\'.'.format(BLOCK_TYPES[block_type]) )\n# remember_spaces = True\n\n# validated = True\n# answer = input('Are you sure? (Y/n) ')\n# if( answer == 'n' or answer =='N' ):\n# validated = False\n\n# if( remember_spaces ):\n# usual_spaces[block_type].append(spaces_number)\n\n# return block_type\n\n\n# # Main formatting loop\n\n# usual_spaces=[[] for i in range(len(BLOCK_TYPES))]\n\n# is_intro = True\n# movie_script = []\n# intro = []\n# last_line_type = -1\n# last_character = ''\n# text = []\n# characters=[]\n\n\n# print(\"Start by telling me when the introduction will end.\")\n\n# for block in script_text.descendants:\n# # if a block is an instance of bs4.Tag it's surrounded by HTML tags.\n# # the next block will be the same text without the tags so we continue without parsing this block\n# if(isinstance(block, Tag)):\n# continue\n\n# #converts string to utf-8\n# block = UnicodeDammit(block, soup.original_encoding).unicode_markup\n# # remove leading and ending new line chars\n# block = block.strip('\\n')\n\n# # skip empty blocks of text\n# if( re.search('\\w', block) == None ):\n# continue\n\n# for line in block.split('\\n'):\n# stripped_line = line.strip(' \\n\\t\\r')\n# if( re.search('\\w', line) == None ):\n# continue\n# print()\n# print()\n# print('------------------------------ Begin line ------------------------------')\n# print(line)\n# print(' ------- End line -------')\n# print()\n# print()\n\n# if( is_intro ):\n# print()\n# answer = input(\"Is this still part of the intro? (Y/n) \")\n\n# if(answer == 'n' or answer == 'N'):\n# is_intro = False\n# movie_script.append({\n# 'type': 'introduction',\n# 'text': '\\n'.join(intro)})\n# else:\n# intro.append(stripped_line)\n# continue\n\n\n# line_type = get_line_type(line, stripped_line, usual_spaces, characters)\n# print(\"The last line was interpreted as '{}'\".format(BLOCK_TYPES[line_type]))\n# print()\n\n# if(last_line_type == -1 # -1 = not initialized\n# or last_line_type == line_type):\n# text.append(stripped_line)\n# else:\n# if(last_line_type == CHARACTER):\n# last_character='\\n'.join(text)\n# if not last_character in characters:\n# characters.append(last_character)\n# elif(last_line_type == SPEECH):\n# movie_script.append({\n# 'type': BLOCK_TYPES[last_line_type],\n# BLOCK_TYPES[CHARACTER]: last_character,\n# 'text': '\\n'.join(text)})\n# print('We just parsed this JSON block:')\n# print(movie_script[-1])\n# else:\n# movie_script.append({\n# 'type': BLOCK_TYPES[last_line_type],\n# 'text': '\\n'.join(text)})\n# print('We just parsed this JSON block:')\n# print(movie_script[-1])\n# text=[stripped_line]\n\n# last_line_type = line_type\n# print()\n\n# print()\n# print()\n\n# movie_script.append({\n# 'type': BLOCK_TYPES[line_type],\n# 'text': '\\n'.join(text)})\n\n# print('We just parsed this JSON block:')\n# print(movie_script[-1])\n# print()\n# print()\n\n# script['movie_script'] = movie_script\n\n# print('All done!')\n\n\n# print(flush=True)\n# print(flush=True)\n# print('(Our current directory is: {})'.format(os.getcwd()), flush=True)\n# out_filename = input('Enter output filename: ')\n\n# try:\n# fd = open(out_filename, 'w')\n# json.dump(script, fd, indent=True)\n# print('Bravo!')\n# except:\n# print(\"Shit broke: \", sys.exc_info()[0])\n# finally:\n# fd.close()\n\n\n\n\n\n#\n#\n# --------------------- formatting sub file ---------------------\n#\n#\n\nprint('formatting sub file')\nline_num = 1\ntiming = False\ntimes = []\ntemp = \"\"\nsub = open(\"files/sw4_sub.txt\", \"r\")\nsubout = open(\"files/sw4_fs.txt\", \"w+\")\ntimeout = open(\"files/sw4_timings.txt\", \"w+\")\nformatted_subs = []\n\nfor l in sub:\n print(l)\n num = str(line_num)\n\n # stripping line of all characters that could mess up string comparisons\n # i.e. all non alphanumeric characters\n x = list(l)\n y = []\n for c in x:\n \tif not c.isalnum() and c != ' ':\n \t\ty.append('')\n \telse:\n \t\ty.append(c)\n x = ''.join(y)\n\n if x == num:\n if len(temp) > 0:\n temp_list = list(temp)\n char_index = 0\n\n # cleaning up line\n while char_index < len(temp_list):\n if not temp_list[char_index].isalnum() and temp_list[char_index] != ' ':\n temp_list[char_index] = ''\n char_index = char_index + 1\n temp = ''.join(temp_list)\n temp = temp.lower()\n temp = temp.strip('\\n\\t\\r')\n formatted_subs.append(temp)\n line_num = line_num + 1\n timing = True\n temp = \"\"\n\n # separating times from subtitles for easier parsing\n elif timing:\n times.append(l)\n timing = False\n # app\n else:\n listl = list(l)\n len_listl = len(listl)\n if (listl[len_listl - 1] == \"\\n\"):\n listl[len_listl - 1] = ' '\n new_l = ''.join(listl)\n temp = temp + new_l\n\n# Appending final line as for loop appends when it reaches next index line\nformatted_subs.append(temp)\n\n# Cleaning up times: removing new line chars\nnew_times = []\nfor line in times:\n line_array = list(line)\n ll = len(line_array)\n if line_array[ll - 1] == \"\\n\":\n line_array[ll - 1] = ''\n elif line_array[0] == \"\\n\":\n line_array[0] = ''\n new_line = ''.join(line_array)\n new_times.append(new_line)\n\n# ---------- writing formatted files ----------\nfor line in new_times:\n timeout.write(line + '\\n')\nfor line in formatted_subs:\n subout.write(line + '\\n')\n\nsub.close()\n" }, { "alpha_fraction": 0.5379976034164429, "alphanum_fraction": 0.5464414954185486, "avg_line_length": 23.8700008392334, "blob_id": "1f0921eb304b990d21fbc8c97106c9a83530513e", "content_id": "711e9f652942094821d6b0728e89eef0febd7688", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2487, "license_type": "no_license", "max_line_length": 95, "num_lines": 100, "path": "/format.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n# coding: utf-8\n\nimport random, sys, os, argparse, json, re\n# from bs4 import BeautifulSoup, Tag, UnicodeDammit\nfrom fuzzywuzzy import fuzz\n\n\n#\n#\n# --------------------- formatting sub file ---------------------\n#\n#\n\n# print('formatting sub file')\nline_num = 1\ntiming = False\ntimes = []\ntemp = \"\"\nsub = open(\"files/sw4_sub.txt\", \"r\")\nsubout = open(\"files/sw4_fs.txt\", \"w+\")\ntimeout = open(\"files/sw4_timings.txt\", \"w+\")\nformatted_subs = []\n\nfor l in sub:\n # print(l)\n num = str(line_num)\n x = l.lower()\n x = l.strip('\\n\\t\\r')\n if x == num:\n if len(temp) > 0:\n formatted_subs.append(temp)\n line_num = line_num + 1\n timing = True\n temp = \"\"\n\n # separating times from subtitles for easier parsing\n elif timing:\n times.append(l)\n timing = False\n # app\n else:\n listl = list(l)\n len_listl = len(listl)-1\n\n # cleaning up line\n while len_listl >= 0:\n if len_listl == 0 and listl[len_listl] == '-':\n # print('this worked')\n listl[len_listl] = '-'\n elif not listl[len_listl].isalnum() and listl[len_listl] != ' ':\n listl[len_listl] = ''\n len_listl = len_listl - 1\n new_l = ''.join(listl)\n new_l = new_l.lower()\n new_l = new_l.strip('\\n\\t\\r')\n temp = temp + new_l\n\n# Appending final line as for loop appends when it reaches next index line\nformatted_subs.append(temp)\n\n# Cleaning up times: removing new line chars\nnew_times = []\nfor line in times:\n line_array = list(line)\n ll = len(line_array)\n if line_array[ll - 1] == \"\\n\":\n line_array[ll - 1] = ''\n elif line_array[0] == \"\\n\":\n line_array[0] = ''\n new_line = ''.join(line_array)\n new_times.append(new_line)\n\n# passing trhough a second time to account for lines which need to be split into multiple lines\n\nl = len(formatted_subs)\ni = 0\nnew_fs = []\nnew_t = []\n\nwhile i < l:\n if '-' not in formatted_subs[i]:\n new_fs.append(formatted_subs[i])\n new_t.append(new_times[i])\n else:\n item = formatted_subs[i]\n item_list = item.split('-')\n new_fs.append(item_list[1])\n new_fs.append(item_list[2])\n new_t.append(new_times[i])\n new_t.append(new_times[i])\n i += 1\n\n# ---------- writing formatted files ----------\nfor line in new_t:\n timeout.write(line + '\\n')\nfor line in new_fs:\n subout.write(line + '\\n')\n\nsub.close()\n" }, { "alpha_fraction": 0.641791045665741, "alphanum_fraction": 0.641791045665741, "avg_line_length": 16, "blob_id": "c081c15d06d5b11ee44eaf069fa22cb66bfe0fd1", "content_id": "609269f1856c40f3bcbb687ac1566aacebfb9a4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 67, "license_type": "no_license", "max_line_length": 31, "num_lines": 4, "path": "/tests/subtest.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "sub = open(\"shortsub.txt\", \"r\")\n\nfor line in sub:\n\tprint(len(line))" }, { "alpha_fraction": 0.5514222979545593, "alphanum_fraction": 0.5711159706115723, "avg_line_length": 26.696969985961914, "blob_id": "e07fe5a432c38c5c4aaf612ede3d8e363d1e973e", "content_id": "8f338ced443e0828786a8b1d989695fb0f407e99", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 914, "license_type": "no_license", "max_line_length": 51, "num_lines": 33, "path": "/sw4specific/replace.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import json\n\nwith open(\"../files/sw4_script.json\") as json_data:\n scr = json.load(json_data)\n\nwith open(\"../files/sw4_speech.json\") as json_data:\n sp = json.load(json_data)\n\nfor item in scr['movie_script']:\n t = item['text']\n t = t.replace('artoo', 'r2')\n t = t.replace('detoo', 'd2')\n t = t.replace('Artoo', 'r2')\n t = t.replace('Detoo', 'd2')\n t = t.replace('seethreepio', 'c-3po')\n t = t.replace('threepio', '3po')\n item['text'] = t\n\nfor item in sp['movie_script']:\n t = item['text']\n t = t.replace('artoo', 'r2')\n t = t.replace('detoo', 'd2')\n t = t.replace('Artoo', 'r2')\n t = t.replace('Detoo', 'd2')\n t = t.replace('seethreepio', 'c-3po')\n t = t.replace('threepio', '3po')\n item['text'] = t\n\nnscr = open(\"files/new_sw4_scr.json\", \"w+\")\nnsp = open(\"files/new_sw4_sp.json\", \"w+\")\n\nnscr.write(json.dumps(scr, indent=2))\nnsp.write(json.dumps(sp, indent=2))\n" }, { "alpha_fraction": 0.5765661001205444, "alphanum_fraction": 0.5893271565437317, "avg_line_length": 27.733333587646484, "blob_id": "aa13d3325ad56e3ab391bcc91e99e111783b3aaa", "content_id": "894fdff0942c927491df2c39f5932a6e6f5f8617", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 862, "license_type": "no_license", "max_line_length": 113, "num_lines": 30, "path": "/hitcomparison.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import json\n\nwith open(\"files/sw4_speech.json\") as j_data:\n orgnl = json.load(j_data)\n\nwith open(\"sw4specific/files/sw4_speech.json\") as json_data:\n new = json.load(json_data)\n\no_hit_count = 0\nn_hit_count = 0\no_total = 0\nn_total = 0\n\nfor item in orgnl['movie_script']:\n o_total += 1\n if item['start_time'] != '':\n o_hit_count += 1\n\nfor item in new['movie_script']:\n n_total += 1\n if item['start_time'] != '':\n n_hit_count += 1\n\no_hit_ratio = float(o_hit_count) / float(o_total)\nn_hit_ratio = float(n_hit_count) / float(n_total)\n\nprint('original hit count was: ' + str(o_hit_count) + \", giving a hit ratio of: \" + str(o_hit_count) + \"/\" + str(\n o_total) + \"=\" + str(o_hit_ratio))\nprint('new hit count was: ' + str(n_hit_count) + \", giving a hit ratio of: \" + str(n_hit_count) + \"/\" + str(\n n_total) + \"=\" + str(n_hit_ratio))\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6808176040649414, "avg_line_length": 19.54838752746582, "blob_id": "222ccea6ed63d15aa9161a4cd3810b303e3afa78", "content_id": "aa89c2fbf1b1727dc2804c5a1805340743856771", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 636, "license_type": "no_license", "max_line_length": 85, "num_lines": 31, "path": "/tests/editdistancetest.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport os\nimport editdistance\nfrom fuzzywuzzy import fuzz\n\n# scr = open(\"sw4script.txt\", \"r\")\n# sub = open(\"subtrack.txt\", \"r\")\n#\n# lines = []\n# lineType = []\n# prevLine = ''\n#\n#\n# for line in scr:\n# print(str(editdistance.eval(line, prevLine)))\n# prevLine = line\n\nline1 = 'Tear this ship apart and bring me the passengers, I want them alive'\nline2 = 'Trooper, tear this ship apart and bring me the ambassador, I want her alive'\n\nline3 = 'Goodbye pal'\nline4 = 'Hello Luke'\n\nfuzz = fuzz.ratio(line3, line4)\n\ned = editdistance.eval(line3, line4)\nprint(str(ed))\nprint(str(fuzz))\n# scr.close()\n# sub.close()" }, { "alpha_fraction": 0.5773955583572388, "alphanum_fraction": 0.6167076230049133, "avg_line_length": 33.91428756713867, "blob_id": "ff674a29740b678469f822a8ca9e86facfbf7850", "content_id": "a6f8f313dbbca177fc457757d4092c103d07b2fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1221, "license_type": "no_license", "max_line_length": 216, "num_lines": 35, "path": "/sw4specific/csvParser.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import json, csv\nfrom pprint import pprint\n\n#opening files\nlocation_data = open(\"files/lll_location_data.csv\", \"w+\")\ncsv_writer = csv.writer(location_data)\nwith open(\"files/lll_script.json\") as json_data:\n\td = json.load(json_data)\nlines = []\nlines.append(['character', 'text', 'start_time', 'end_time', 'location', 'time in ms'])\n\nfor item in d['movie_script']:\n\tif item['type'] == 'location':\n\t\tloc_arr = item['text'].split('-')\n\t\ttemp = loc_arr[0]\n\t\ttemp = temp.strip('EXT.')\n\t\ttemp = temp.strip('INT.')\n\t\tcurr_location = temp\n\t\ttemp_arr = loc_arr[1:]\n\t\tsub_loc = ''.join(temp_arr)\n\telif item['type'] == 'speech' and item['start_time'] != '':\n\t\tstart_time = item['start_time']\n\t\tend_time = item['end_time']\n\n\t\tst_arr = start_time.split(':')\n\t\tet_arr = end_time.split(':')\n\t\ttemp_st = st_arr[2].split(',')\n\t\ttemp_et = et_arr[2].split(',')\n\n\t\ttime_in_ms = ((int(et_arr[0])*60*60*1000) + (int(et_arr[1])*60*1000) + (int(temp_et[0])*1000) + int(temp_et[1])) - ((int(st_arr[0])*60*60*1000) + (int(st_arr[1])*60*1000) + (int(temp_st[0])*1000) + int(temp_st[1]))\n\n\t\tline = [item['character'], item['text'], item['start_time'], item['end_time'], curr_location, str(time_in_ms)]\n\t\tlines.append(line)\n\ncsv_writer.writerows(lines)" }, { "alpha_fraction": 0.5690039992332458, "alphanum_fraction": 0.5739169120788574, "avg_line_length": 19.740739822387695, "blob_id": "228af2fec316985d46f248dd387c5554e1bc3c40", "content_id": "2b54dd6c872245fcab1563272936f5761c263274", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2239, "license_type": "no_license", "max_line_length": 93, "num_lines": 108, "path": "/tests/editdistancetests.py", "repo_name": "patrickberry1/FinalYearProject", "src_encoding": "UTF-8", "text": "import random\nimport sys\nimport os\nfrom fuzzywuzzy import fuzz\n\nscr = open(\"shortscript.txt\", \"r\")\nsub = open(\"shortsub.txt\", \"r\")\nsubout = open(\"formatted_subs.txt\", \"w+\")\nscrout = open(\"formatted_scr.txt\", \"w+\")\n\nfmtdScr = []\nfmtdSubs = []\n\n##\n##formatting script file\n##\nfor scrLine in scr:\n\n i = 0\n scrList = list(scrLine)\n inSent = False\n inPars = False\n\n ##remove non-alphanumeric characters from lines in script\n ##and remove text within parentheses\n while i < len(scrList):\n if not scrList[i].isalnum():\n \tif scrList[i] == '(':\n \t\tinPars = True\n \t\tscrList[i] = ''\n \telif scrList == ')':\n \t\tinPars = False\n \t\tscrList[i] = ''\n \telif scrList[i] == ' ' and inSent:\n \t\tscrList[i] = scrList[i]\n \telse:\n \t\tscrList[i] = ''\n else:\n \tinSent = True\n \tif inPars:\n \t\tscrList[i] = ''\n i = i+1\n\n ##converting list back into string and removing white space\n ##formatting headers\n scrLine = \"\".join(scrList)\n if(scrLine.isupper()):\n scrLine = \">>>>>\" + scrLine\n else:\n scrLine = scrLine.lower()\n\n fslen = len(fmtdScr)\n\n ##adding tags and appending individual formatted lines together\n if fslen == 0:\n fmtdScr.append(\">>>>>START\")\n fmtdScr.append(scrLine)\n elif not fmtdScr[fslen-2].isupper() and fmtdScr[fslen-1] == \"\" and not scrLine.isupper():\n \tfmtdScr.append(\">>>>>DESC\")\n fmtdScr.append(scrLine)\n else:\n fmtdScr.append(scrLine)\n\n##\n##formatting sub file\n##\nskip = 0\nx = 1\ntiming = False\ntimes = []\ntemp = \"\"\n\nfor l in sub:\n\tnum = str(x) + '\\n'\n\tif l == num:\n\t\tind = 0\n\t\ttemp = temp.lower()\n\t\ttempList = list(temp)\n\t\twhile ind < len(tempList):\n\t\t\tif not tempList[ind].isalnum() and tempList[ind] != ' ':\n\t\t\t\ttempList[ind] = ''\n\t\t\tind = ind + 1\n\t\tif len(tempList) > 0:\n\t\t\tfmtdSubs.append(''.join(tempList))\n\t\tx = x+1\n\t\ttiming = True\n\t\ttemp = \"\"\n\telif timing:\n\t\ttimes.append(l)\n\t\ttiming = False\n\telse:\n\t\ttemp = temp + l\n\n\nfor line in times:\n\tline = line.split(\"-->\")\n\tprint(line)\n\n##\n##printing formatted files\n##\nfor line in fmtdSubs:\n subout.write(line + '\\n')\nfor line in fmtdScr:\n\tscrout.write(line + '\\n')\n\nscr.close()\nsub.close()" } ]
11
vgrangep/cooperative-agents-simulation
https://github.com/vgrangep/cooperative-agents-simulation
bd045574bfa193475296a8b6b45c7cdbbcb0fa87
364bf4d89dfc4000ed45c1e44e86f253f986030a
584a9cff985dc2ea2e2cec26963675958f9aa1cd
refs/heads/main
2023-05-31T15:44:52.640968
2021-06-16T17:10:58
2021-06-16T17:10:58
377,572,355
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5519256591796875, "alphanum_fraction": 0.5535192489624023, "avg_line_length": 31.179487228393555, "blob_id": "3d4f3e9006e8eb1d548280bb94870815b52513d4", "content_id": "c043a37671dd92db04cdfeac59d627917ffe6d23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3765, "license_type": "no_license", "max_line_length": 186, "num_lines": 117, "path": "/agent.py", "repo_name": "vgrangep/cooperative-agents-simulation", "src_encoding": "UTF-8", "text": "import random\n\n\nclass Agent:\n \"\"\"\n Notes\n ----------\n There is a really big assumption in this simulation. Agents have no idea when the game will end. Therefore, they can not modify their strategy based on the number games left to play.\n The score increment is calculated based on a combination of our answer and the answer of the other agent.\n Table format :\n (our answer, other agent's answer) : our score increment\n cooperate : True (T), betray : False (F)\n (T,T) : 1\n (T,F) : 20\n (F,T) : 0\n (F,F) : 5\n \"\"\"\n\n def __init__(self, identifier, description=\"unamed agent\"):\n \"\"\"\n Parameters\n ----------\n id : int\n Our identifier\n description : String\n Short description\n agent_id: int\n Id of the agent that is currently interracting with us\n cooperate : boolean\n out current answer (True : cooperate, False: betray)\n memory : dictionary\n past interractions with the other agents. (dictionnary whose key is the agent id)\n score : array of int\n our score increment, updated after each round\n \"\"\"\n self.id = identifier\n self.description = description\n self.agent_id = None\n self.cooperate = True\n self.memory = {}\n self.score = []\n\n def __str__(self):\n return 'ID='+str(self.id)+'; '+self.description\n\n def decide(self):\n \"\"\"\n logic for decision\n The only known information at that time is the Agent with whom we interract\n and our memory of our past interractions with them\n \"\"\"\n ###########################################################################\n # Add you logic here\n # for example :\n # - always cooperate\n # - always betray\n # - randomly choose\n # - betray if other agent has betrayed you in the past\n # - cooperate by default, but betray if the agent betrayed you last time.\n self.cooperate = True\n ###########################################################################\n\n def initiate_interraction(self, agent_id):\n self.agent_id = agent_id\n\n def update_memory(self, agent_decision):\n \"\"\"\n update our memory with the last interraction we have had with a specific agent.\n store both our answer and theirs, stored as a list, in an array : (their answer, our answer)\n \"\"\"\n last_memory = (agent_decision, self.cooperate)\n if self.agent_id in self.memory:\n self.memory[self.agent_id].append(last_memory)\n else:\n self.memory[self.agent_id] = [last_memory]\n\n def update_score(self, delta):\n \"\"\"\n Parameters\n ----------\n delta : int\n our last round score increase\n \"\"\"\n self.score.append(delta)\n\n\nclass CollaborativeAgent(Agent):\n def decide(self):\n self.cooperate = True\n\n\nclass ReciprocalAgent(Agent):\n def decide(self):\n self.cooperate = True\n if self.agent_id in self.memory:\n self.cooperate = self.memory[self.agent_id][-1]\n\n\nclass TraitorAgent(Agent):\n def decide(self):\n self.cooperate = False\n if self.agent_id in self.memory:\n if False in self.memory[self.agent_id]:\n self.cooperate = False\n\n\nclass UnforgivingAgent(Agent):\n def decide(self):\n self.cooperate = True\n if self.agent_id in self.memory:\n if False in self.memory[self.agent_id]:\n self.cooperate = False\n\n\nclass ChaoticAgent(Agent):\n def decide(self):\n self.cooperate = random.choice((True, False))\n" }, { "alpha_fraction": 0.5127221941947937, "alphanum_fraction": 0.5346810817718506, "avg_line_length": 31.23595428466797, "blob_id": "ca099d8457d07d288cd063a7700fb94de622e090", "content_id": "7f7900134f3c0cb7c947e9f3ffce7c42a6d25249", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2869, "license_type": "no_license", "max_line_length": 126, "num_lines": 89, "path": "/app.py", "repo_name": "vgrangep/cooperative-agents-simulation", "src_encoding": "UTF-8", "text": "from agent import *\nimport random\n\n\ndef display_results(agents):\n for ag in agents:\n print(sum(ag.score))\n\n\ndef pair_agents(agents, verbose=False):\n random_agents = list(agents)\n random.shuffle(random_agents)\n\n pairings = list(zip(random_agents[:len(random_agents)//2],\n random_agents[len(random_agents)//2:]\n ))\n if verbose:\n for p in pairings:\n print(\"Pair : \" + str(p[0]) + \"|\" + str(p[1]))\n return pairings\n\n\nif __name__ == \"__main__\":\n verbose = False\n agents = []\n\n agents.append(ReciprocalAgent(0, \"ReciprocalAgent\"))\n agents.append(ReciprocalAgent(1, \"ReciprocalAgent\"))\n agents.append(ReciprocalAgent(2, \"ReciprocalAgent\"))\n\n agents.append(CollaborativeAgent(3, \"CollaborativeAgent\"))\n agents.append(CollaborativeAgent(4, \"CollaborativeAgent\"))\n agents.append(CollaborativeAgent(5, \"CollaborativeAgent\"))\n agents.append(CollaborativeAgent(9, \"CollaborativeAgent\"))\n\n agents.append(TraitorAgent(6, \"TraitorAgent\"))\n agents.append(TraitorAgent(7, \"TraitorAgent\"))\n agents.append(TraitorAgent(8, \"TraitorAgent\"))\n\n nb_rounds = 10000 # random.range(100)\n\n if verbose:\n print(\"nb of rounds\", nb_rounds)\n for r in range(nb_rounds):\n pairings = pair_agents(agents, verbose)\n\n for pair in pairings:\n # Evaluate stretegie\n pair[0].initiate_interraction(pair[1])\n pair[0].decide()\n pair[1].initiate_interraction(pair[0])\n pair[1].decide()\n\n # resolve match\n \"\"\" Table format :\n (our answer, other agent's answer) : our score increment\n cooperate : True (T), betray : False (F)\n (T,T) : 1\n (T,F) : 20\n (F,T) : 0\n (F,F) : 5 \"\"\"\n a = pair[0].cooperate\n b = pair[1].cooperate\n\n if a and b:\n pair[0].update_score(1)\n pair[1].update_score(1)\n if not a and not b:\n pair[0].update_score(5)\n pair[1].update_score(5)\n if not a and b:\n pair[0].update_score(0)\n pair[1].update_score(20)\n if a and not b:\n pair[0].update_score(20)\n pair[1].update_score(0)\n\n if verbose:\n print(\"({ad},{bd}): ({a},{b})\".format(\n ad=pair[0].description + \"_\" + str(pair[0].id), bd=pair[1].description + \"_\" + str(pair[1].id), a=a, b=b))\n print(\"update is a:{a}, b:{b}\".format(\n a=pair[0].score[-1], b=pair[1].score[-1]))\n # Update memories\n pair[0].update_memory(b)\n pair[1].update_memory(a)\n\n # display_results(agents)\n result = [sum(i.score) / nb_rounds for i in agents]\n print(result)\n" } ]
2
Ymin-dgnn/MyFirstDeepLearning
https://github.com/Ymin-dgnn/MyFirstDeepLearning
e006dd03f9b1ea1bfeb5bc829c9172ac075075b2
a5850edd9fcdce52fd2de2d96fd4281b0b4de1a4
9660e1f9153924033ea53a03b9da2eb83ce4394c
refs/heads/master
2021-01-01T12:54:35.097000
2020-09-07T05:22:21
2020-09-07T05:22:21
239,288,394
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7853107452392578, "alphanum_fraction": 0.8192090392112732, "avg_line_length": 49.57143020629883, "blob_id": "87c64b596a51937bf943888223c8b1d62d6fef0d", "content_id": "2340efee2ee9acf0cc851ecfd2034fe999b41a18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 354, "license_type": "no_license", "max_line_length": 132, "num_lines": 7, "path": "/README.md", "repo_name": "Ymin-dgnn/MyFirstDeepLearning", "src_encoding": "UTF-8", "text": "# MyFirstDeepLearning\nThis is my first step in Deep Learning \n\nI studied Deep Learning with sungkim's class https://www.youtube.com/watch?v=BS6O0zOGX4E&amp;list=PLlMkM4tgfjnLSOjrEJN31gZATbcj_MpUm\n\nLater I heard that Tensorflow is harder. So, I started learning Pytorch\nhttps://www.youtube.com/watch?v=SKq-pmkekTk&list=PLlMkM4tgfjnJ3I-dbhO9JTw7gNty6o_2m\n" }, { "alpha_fraction": 0.5457875728607178, "alphanum_fraction": 0.5888278484344482, "avg_line_length": 25.634145736694336, "blob_id": "b321d514aafe2a93bb42969fcdb96a53b5f16202", "content_id": "945c5fa4f36713b5678f0d3a38f0c1d8c71b1996", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1092, "license_type": "no_license", "max_line_length": 74, "num_lines": 41, "path": "/pytorch/SungKimPytorch4_Ex5.py", "repo_name": "Ymin-dgnn/MyFirstDeepLearning", "src_encoding": "UTF-8", "text": "import torch\nfrom torch.autograd import Variable\n\nx_data = [1.0, 2.0, 3.0]\ny_data = [2.0, 4.0, 6.0]\n\nw_1 = Variable(torch.Tensor([1.0]), require_grad=True) # Any random value\nw_2 = Variable(torch.Tensor([1.0]), require_grad=True)\n# require_grad : I need to cumpute gradient\n\n\ndef forward(x):\n return x * x * w_2 + x * w_1\n\n\ndef loss(x, y):\n y_pred = forward(x)\n return (y_pred - y) * (y_pred - y)\n\n\nprint(\"pridict (before training)\", 4, forward(4).data[0])\n\nfor epoch in range(10):\n for x_val, y_val in zip(x_data, y_data):\n l = loss(x_val, y_val)\n l.backward()\n print(\"\\tgrad: \", x_val, y_val, w_1.grad.data[0])\n w_1.data = w_1.data - 0.01 * w_1.grad.data\n\n # Manually zero the gradients after updating weights\n w_1.grad.data.zero_()\n\n print(\"\\tgrad: \", x_val, y_val, w_2.grad.data[0])\n w_2.data = w_2.data - 0.01 * w_2.grad.data\n\n # Manually zero the gradients after updating weights\n w_2.grad.data.zero_()\n\n print(\"progress:\", epoch, l.data[0])\n\nprint(\"pridict (after training)\", 4, forward(4).data[0])\n" } ]
2
slamminsammya/Prolog-99
https://github.com/slamminsammya/Prolog-99
fb21a152dc2846cad6185452792e5aecc12934f9
7417d4eae178d69633822235e3064cc6a22b22af
1ad02d2f842cdaa9ecbeb2dba0270ad7fb1a9876
refs/heads/master
2021-04-30T06:33:17.472768
2018-02-13T23:14:15
2018-02-13T23:14:15
121,447,914
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5404821634292603, "alphanum_fraction": 0.554730236530304, "avg_line_length": 22.3789119720459, "blob_id": "ce36d36f6cfcf84415f79038d1312efa6a751710", "content_id": "0ef0cdd1e16f30547cfe106f0e158fb9a9be7db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14809, "license_type": "no_license", "max_line_length": 88, "num_lines": 607, "path": "/ninety_nine_problems.py", "repo_name": "slamminsammya/Prolog-99", "src_encoding": "UTF-8", "text": "import random\r\nimport math\r\nimport time\r\n\r\n## Ninety-nine prolog problems. Meant to practice basic problem solving.\r\n## 1) Find the last element of a list\r\n\r\ndef get_last(ell):\r\n return ell[-1]\r\n\r\n## 2) Find the second to last element of a list\r\n\r\ndef second_last(ell):\r\n return ell[-2]\r\n\r\n## 3) Find the k'th element of a list, where first element is 1'st.\r\n\r\ndef kth_element(ell, k):\r\n return ell[k - 1]\r\n\r\n## 4) Find the number of elements in a list.\r\n\r\ndef num_elements(ell):\r\n return len(ell)\r\n\r\n## 5) Reverse a list (python has built in list reversal but\r\n## we gonna implement it ourselves.\r\n\r\ndef reverse(ell):\r\n \r\n reversed_list = [ell[j] for j in range( -1, -len(ell) - 1)]\r\n return reversed_list\r\n\r\n## 6) Find out if a list is a palindrome.\r\n\r\ndef palindrome(ell):\r\n \r\n reverse_ell = reverse(ell)\r\n for j in range(len(ell)):\r\n if ell[j] != reverse_ell[j]:\r\n return False\r\n return True\r\n\r\n## 7) Flatten a nested list structure.\r\n\r\ndef flatten(ell):\r\n \r\n if type(ell[0]) == list:\r\n first_part = flatten(ell[0])\r\n\r\n else:\r\n first_part = ell[0]\r\n\r\n return first_part + flatten(ell[1:])\r\n\r\n## 8) Eliminate consecutive duplicates of list elements\r\n\r\ndef compress(ell):\r\n\r\n for j in range(len(ell) - 1):\r\n if ell[j] == ell[j + 1]:\r\n new_list = ell[ :j + 1] + ell[j + 2: ]\r\n return compress(new_list)\r\n return ell\r\n\r\n## 9) Pack consecutive duplicates of list elements into sublists.\r\n\r\ndef pack(ell):\r\n\r\n output = []\r\n counter = 1\r\n \r\n for j in range(len(ell) - 1):\r\n \r\n if ell[j] != ell[j + 1]:\r\n block = counter * [ ell[j] ]\r\n output.append(block)\r\n counter = 1\r\n \r\n else:\r\n counter += 1\r\n \r\n last_block = counter * [ ell[-1] ]\r\n output.append(last_block)\r\n\r\n return output\r\n \r\n## 10) Run-length encoding of a list.\r\n\r\ndef encode(ell):\r\n packed_list = pack(ell)\r\n \r\n for j in range(len(packed_list)):\r\n multiplicity = len(packed_list[j])\r\n element = packed_list[j][0]\r\n packed_list[j] = [multiplicity, element]\r\n \r\n return packed_list\r\n\r\n## 11) Modify run-length encoding.\r\n\r\ndef modified_encode(ell):\r\n\r\n encoded = encode(ell)\r\n \r\n for j in range(len(encoded)):\r\n multiplicity = encoded[j][0]\r\n \r\n if multiplicity == 1:\r\n encoded[j] = encoded[j][1]\r\n \r\n return encoded\r\n\r\n## 12) Decode a run-length enccoded list as in problem 11.\r\n\r\ndef decode(encoded):\r\n\r\n output = []\r\n\r\n for j in range(len(encoded)):\r\n if type(encoded[j]) == list:\r\n multiplicity = encoded[j][0]\r\n element = encoded[j][1]\r\n chunk = multiplicity * [element]\r\n\r\n else:\r\n chunk = [ encoded[j] ]\r\n output += chunk\r\n\r\n return output\r\n\r\n## 13) Run-length encoding of a list (direct solution)\r\n\r\ndef alternate_encode(ell):\r\n\r\n output = []\r\n counter = 1\r\n \r\n for j in range(len(ell) - 1):\r\n \r\n if ell[j] != ell[j + 1]:\r\n \r\n if counter == 1:\r\n block = ell[j]\r\n \r\n else:\r\n block = [counter, ell[j]]\r\n \r\n output.append(block)\r\n counter = 1\r\n \r\n else:\r\n counter += 1\r\n \r\n if counter == 1:\r\n last_block = ell[-1]\r\n \r\n else:\r\n last_block = [counter, ell[-1]]\r\n \r\n output.append(last_block)\r\n\r\n return output\r\n\r\n## 14) Duplicate the elements of a list.\r\n\r\ndef duplicate(ell):\r\n\r\n ouptut = []\r\n \r\n for x in ell:\r\n output += [x, x]\r\n \r\n return output\r\n\r\n## 15) Duplicate the elements of a list a given number of times.\r\n\r\ndef duplicate(ell, n):\r\n\r\n output = []\r\n \r\n for x in ell:\r\n chunk = n * [x]\r\n output += chunk\r\n\r\n return output\r\n\r\n## 16) Drop every n'th element of a list.\r\n\r\ndef drop(ell, n):\r\n \r\n if n > len(ell):\r\n return ell\r\n\r\n else:\r\n return ell[ :n - 1] + drop(ell[n: ], n)\r\n\r\n## 17) Split a list into two parts, the length of the first part is given.\r\n\r\ndef split(ell, k):\r\n\r\n return [ell[:k], ell[k:]]\r\n\r\n## 18) Extract a slice from a list. I.e. a list containing i'th element of\r\n## ell up to the j'th element, inclusive. Here we start counting at 1.\r\n\r\ndef interval(ell, i, j):\r\n return ell[i - 1: j]\r\n\r\n## 19) Rotate a list n places to the left.\r\n\r\ndef left_rotate(ell, n):\r\n return ell[n - 1:] + ell[: n - 1]\r\n\r\n## 20) Remove the k'th element from a list. (Solve without using built in\r\n## functions where possible.)\r\n\r\ndef remove(ell, k):\r\n return ell[: k - 1] + ell[k:]\r\n\r\n## 21) Insert an element at a given position in a list.\r\n\r\ndef insert(ell, element, k):\r\n return ell[: k - 1] + [element] + ell[k - 1: ]\r\n\r\n## 22) Create a list containing all integers within a given range.\r\n\r\ndef make_range(j,k):\r\n number = j\r\n ell = []\r\n \r\n while number <= k:\r\n ell.append(number)\r\n number += 1\r\n\r\n return ell\r\n\r\n## 23) Extract a given number of randomly selected elements from a list.\r\n\r\ndef extract_randomly(ell, n):\r\n\r\n output = []\r\n count = n\r\n current_list = ell\r\n \r\n while count > 0:\r\n random_index = random.randrange(len(ell))\r\n \r\n output.append(ell[random_index])\r\n ell = remove(ell, random_index + 1)\r\n ## We remove at random_index + 1 since the problem uses a different\r\n ## counting convention than python, starting at one, and the remove \r\n ## function follows this convention for its argument.\r\n count = count - 1\r\n\r\n return output\r\n\r\n## 24) Draw N different random numbers from the interval 1,..., M\r\n\r\ndef lotto(tickets, m):\r\n interval = make_range(1, m)\r\n \r\n if n <= m:\r\n return extract_randomly(interval, n)\r\n \r\n else:\r\n print 'Invalid arguments. Number of choices cannot exceed size of interval.'\r\n\r\n## 25) Generate a random permutation of the elements of a list.\r\n\r\ndef random_perm(ell):\r\n return extract_randomly(ell, len(ell))\r\n\r\n## 26) Generate all combinations of k distinct objects from n distinct objects.\r\n\r\ndef combinations(ell, k):\r\n\r\n if k == 1:\r\n singletons = [ [x] for x in ell]\r\n return singletons\r\n\r\n elif k > len(ell):\r\n return []\r\n\r\n else:\r\n first_element = ell[0]\r\n do_contain = [ [first_element] + x for x in combinations(ell[1:], k - 1) ]\r\n dont_contain = combinations(ell[1:], k)\r\n return do_contain + dont_contain\r\n \r\n pass\r\n\r\n## 27) Group the elements of a set into disjoint subsets. (Partition problem)\r\n## Here we do care about the order of the partition. A | B different than\r\n## B | A\r\n\r\ndef partition(ell, k):\r\n\r\n if k == 1:\r\n return [ell]\r\n\r\n elif k > len(ell):\r\n return []\r\n\r\n else:\r\n partitions = []\r\n \r\n for size in range(1, len(ell) - (k - 1) + 1):\r\n first_subgroups = combinations(ell, size)\r\n \r\n for first in first_subgroups:\r\n remaining_list = [elements for elements in ell if elements not in first]\r\n remaining_partitions = partition(remaining_list, k - 1)\r\n resulting_partitions = [ [first] + [x] for x in remaining_partitions]\r\n partitions += resulting_partitions\r\n \r\n return partitions\r\n\r\n pass\r\n \r\n \r\n## 28) Sorting a list of lists according to length of sublists, short to long.\r\n## As a second version, sort them according to their length frequency.\r\n\r\ndef list_sort_1(ell):\r\n\r\n ## Implement a merge sort, but compare lengths of elements.\r\n \r\n if len(ell) == 1:\r\n return ell\r\n \r\n elif len(ell) == 2:\r\n first = ell[0]\r\n second = ell[1]\r\n if len(first) > len(second):\r\n return [ell[1], ell[0]]\r\n else:\r\n return ell\r\n\r\n else:\r\n midpoint = len(ell) / 2\r\n sublist_1 = ell[:midpoint]\r\n sublist_2 = ell[midpoint:]\r\n sorted_sublist_1 = list_sort_1(sublist_1)\r\n sorted_sublist_2 = list_sort_1(sublist_2)\r\n merged_list = []\r\n \r\n while len(merged_list) < len(ell):\r\n\r\n if len(sorted_sublist_1) == 0:\r\n merged_list += sorted_sublist_2\r\n\r\n elif len(sorted_sublist_2) == 0:\r\n merged_list += sorted_sublist_1\r\n\r\n elif len(sorted_sublist_1[0]) < len(sorted_sublist_2[0]):\r\n merged_list.append(sorted_sublist_1.pop(0))\r\n\r\n else:\r\n merged_list.append(sorted_sublist_2.pop(0))\r\n\r\n return merged_list\r\n\r\n## Now implement a quicksort, using left pivot.\r\n\r\ndef list_sort_2(ell):\r\n\r\n if len(ell) == 0 or len(ell) == 1:\r\n return ell\r\n\r\n else:\r\n pivot = ell[0]\r\n sublist_lessthan = []\r\n sublist_greaterthan = []\r\n \r\n for element in ell[1:]:\r\n \r\n if len(element) < len(pivot):\r\n sublist_lessthan.append(element)\r\n\r\n else:\r\n sublist_greaterthan.append(element)\r\n\r\n sorted_list = list_sort_2(sublist_lessthan) + [pivot] +list_sort_2(\r\n sublist_greaterthan)\r\n \r\n return sorted_list\r\n\r\n## Now sorted by length frequency\r\n\r\n## Subroutine to take a sorted list of lists and group its elements\r\n## by length. Length of an element of output is therefore frequency of\r\n## given length.\r\n\r\n## Subroutine to output longest consecutive sublist of elements of same size,\r\n## beginning with the first element.\r\n\r\ndef head_samesize(ell):\r\n \r\n index = 1\r\n while index < len(ell):\r\n if len(ell[index - 1]) != len(ell[index]):\r\n return ell[:index]\r\n\r\n index += 1\r\n \r\n return ell\r\n\r\n \r\ndef group_by_size(ell):\r\n\r\n ## List ell must already be sorted by size.\r\n output = []\r\n \r\n while len(ell) > 0:\r\n group = head_samesize(ell)\r\n output.append(group)\r\n ell = ell[len(group):]\r\n return output\r\n\r\ndef sort_by_frequency(ell):\r\n\r\n sorted_list = list_sort_2(ell)\r\n list_of_sizegroups = group_by_size(sorted_list)\r\n sorted_list_of_sizegroups = list_sort_2(list_of_sizegroups)\r\n sorted_by_frequency = []\r\n\r\n for group in sorted_list_of_sizegroups:\r\n sorted_by_frequency += group\r\n \r\n return sorted_by_frequency\r\n\r\n\r\n## Determine whether a given integer is prime. Let us sieve like\r\n## Eranthoses.\r\n\r\n## First define a function which, given a list and an index as input,\r\n## changes all entries whose index is a multiple (not zero or one) of\r\n## given index to false.\r\n\r\ndef sieve(ell, index):\r\n\r\n current_index = 2 * index\r\n \r\n while current_index < len(ell):\r\n ell[current_index] = False\r\n current_index += index\r\n\r\n return ell\r\n\r\n## Given a list sieved and a starting point, finds the first non-sieved\r\n## element following starting point.\r\n\r\ndef next_prime(ell, last_prime):\r\n\r\n index = last_prime + 1\r\n while ell[index] == False:\r\n index += 1\r\n \r\n return ell[index]\r\n\r\ndef sieve_integers(bound):\r\n\r\n integers = range((bound + 1) ** 2)\r\n integers[0], integers[1] = False, False\r\n current_prime = 2\r\n \r\n while current_prime < bound:\r\n sieve(integers, current_prime)\r\n current_prime = next_prime(integers, current_prime)\r\n \r\n return integers\r\n \r\n\r\n## Uses previous subroutines. Composites get set to 'False', so we start\r\n## setting 0, 1 to False, and success is if our desired input is an integer,\r\n## i.e. was not set to False. \r\n\r\ndef isprime(n):\r\n \r\n # We sieve by changing composites to the Boolean false because why not!\r\n bound = int(math.floor(math.sqrt(n)) + 1)\r\n integers = sieve_integers(bound)\r\n\r\n return type(integers[n]) == int\r\n\r\n\r\n## 32) Euclid's algorithm.\r\n\r\ndef gcd(a, b):\r\n if a > b:\r\n a, b = b, a\r\n\r\n if a == 0:\r\n return b\r\n\r\n else:\r\n qa = a\r\n remainder = b - a\r\n while remainder > a:\r\n qa += a\r\n remainder = b - qa\r\n\r\n return gcd(a, remainder)\r\n\r\n## 33) Determine whether two positive integers are coprime.\r\n\r\ndef coprime(a, b):\r\n return gcd(a, b) == 1\r\n\r\n## 34) Calculate Euler totient function.\r\n\r\ndef totient(m):\r\n \r\n phi = 0\r\n for n in range(1, m):\r\n if coprime(n, m):\r\n phi += 1\r\n return phi\r\n\r\n## 35) Determine the prime factors of a positive integer with\r\n## multiplicity.\r\n\r\ndef get_factors(n):\r\n\r\n if n == 1 or n == 0:\r\n return []\r\n \r\n else:\r\n bound = int(math.floor(math.sqrt(n)) + 1)\r\n primes_to_bound = [p for p in sieve_integers(bound) if type(p) == int\r\n and p < bound]\r\n\r\n for prime in primes_to_bound:\r\n if gcd(prime, n) > 1:\r\n return [prime] + get_factors(int(n / prime))\r\n return [n]\r\n \r\ndef factor(n):\r\n \r\n return encode(get_factors(n))\r\n\r\n## 36) Determine prime factors with multiplicity (accidentally done as 35.\r\n\r\n## 37) Calculate totient function using your factorization.\r\n\r\ndef phi(prime_multiplicity):\r\n\r\n prime = prime_multiplicity[1]\r\n multiplicity = prime_multiplicity[0]\r\n return (prime - 1) * prime ** (multiplicity - 1)\r\n\r\ndef totient_2(n):\r\n\r\n totient = 1\r\n factorization = factor(n)\r\n for prime_multiplicity in factorization:\r\n totient = totient * phi(prime_multiplicity)\r\n return totient\r\n\r\n## 38) Compare the two totient algorithms.\r\n\r\ndef advantage(n):\r\n\r\n good_start = time.time()\r\n totient_2(n)\r\n good_finish = time.time()\r\n good_time = good_finish - good_start\r\n\r\n bad_start = time.time()\r\n totient(n)\r\n bad_finish = time.time()\r\n bad_time = bad_finish - bad_start\r\n\r\n return 'The efficient algorithm had an advantage of %s seconds for n =%s.' % (\r\n bad_time - good_time, n)\r\n\r\n## 39) A list of prime numbers.\r\n\r\n## No desire to be efficient. This is extraordinarily redundant.\r\n\r\ndef prime_list(lower_bound, upper_bound):\r\n\r\n return [p for p in range(lower_bound, upper_bound + 1) if isprime(p)]\r\n\r\n## 40) Goldbach's conjecture: Find two prime numbers that sum to a given even.\r\n\r\ndef goldbach_pair(n):\r\n\r\n upper_bound = int(math.floor(n / 2) + 1)\r\n primes = prime_list(3, upper_bound)\r\n for prime in primes:\r\n if isprime(n - prime):\r\n return [prime, n - prime]\r\n return None\r\n\r\n## 41) List of Goldbach pairs: Given a range of integers, print a list of\r\n## all even numbers and their Goldbach composition.\r\n\r\ndef list_goldbachs(a, b):\r\n for n in range(a,b):\r\n if n % 2 == 0:\r\n print goldbach_pair(n)\r\n pass\r\n\r\n## Problems skip to 46?\r\n## 46) \r\n \r\n\r\n\r\n" }, { "alpha_fraction": 0.6808510422706604, "alphanum_fraction": 0.7659574747085571, "avg_line_length": 22.5, "blob_id": "3e5d5c37e294ef935d7f02e5b09a16775ff2b33a", "content_id": "360f1c8cd5382f2c31713704aefb2c2f683b7e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 47, "license_type": "no_license", "max_line_length": 34, "num_lines": 2, "path": "/README.md", "repo_name": "slamminsammya/Prolog-99", "src_encoding": "UTF-8", "text": "# Prolog-99\n99 Prolog Problems done in Python.\n" } ]
2
rico0821/miniweb
https://github.com/rico0821/miniweb
87b372af22e6b3c1c9bbee51e5171e152e108e37
5a8f7f43ea88c63dec2a6b656e79923c3fd42b89
acc16bed6faeba44948c951c82c0dcaeef4f0d6f
refs/heads/master
2020-03-08T04:09:15.296675
2018-12-08T06:25:07
2018-12-08T06:25:07
116,662,523
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5956150889396667, "alphanum_fraction": 0.6090133786201477, "avg_line_length": 26.762712478637695, "blob_id": "4d24308e514b61faa6dd016e60112a4ab1e74ac2", "content_id": "50b03894c556da1e67bc13211bde22902a045339", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1642, "license_type": "no_license", "max_line_length": 83, "num_lines": 59, "path": "/web_frame/controller/token.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom itsdangerous import URLSafeTimedSerializer, BadSignature\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db\nfrom web_frame.model.user import User\n\n\n@web_frame.route('/user/activate/<token>')\ndef activate_user(token):\n \n try:\n email = confirm_token(token)\n print(email)\n \n except BadSignature:\n Log.error(str(BadSignature))\n abort(404)\n \n user = User.query.filter_by(email=email).first()\n try: \n session['user_info'].email_verified = True\n user.email_verified = True\n db.session.commit()\n \n except Exception as e:\n db.session.rollback()\n Log.error(str(e))\n raise e\n \n activated = True\n \n return redirect(url_for('.main', user_activated=activated))\n \n#########################################################################\ndef generate_verification_token(email):\n s = URLSafeTimedSerializer('secret_key')\n return s.dumps(email, salt='security-salt')\n\ndef confirm_token(token, expiration=3600):\n s = URLSafeTimedSerializer('secret_key')\n try:\n email = s.loads(\n token,\n salt='security-salt',\n max_age=expiration\n )\n except:\n return False\n \n return email\n \ndef create_token_link(token):\n base_url = 'http://port-2000.miniweb-ricopanda0821566506.codeanyapp.com'\n token_link = base_url + url_for('.activate_user', token=token, __external=True)\n return token_link\n " }, { "alpha_fraction": 0.5834283828735352, "alphanum_fraction": 0.5910300016403198, "avg_line_length": 31.35802459716797, "blob_id": "3cbfcfe9b2fcd2b7e75c3401c4899a3dccc98d38", "content_id": "926f62e3298fb228e27eb4ff92a768a1b3bcbef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2631, "license_type": "no_license", "max_line_length": 95, "num_lines": 81, "path": "/web_frame/__init__.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nfrom flask import Flask, render_template, request, url_for\n\n\ndef print_settings(config):\n print('-----------------------------------------------')\n print('SETTINGS')\n print('-----------------------------------------------')\n for key, value in config:\n print('%s=%s' % (key, value))\n print('-----------------------------------------------')\n\n############################################################################\ndef not_found(error):\n return render_template('404.html'), 404\n\ndef server_error(error):\n err_msg = str(error)\n return render_template('500.html', err_msg=err_msg), 500\n\n############################################################################\ndef create_app(config_filepath='resource/config.cfg'):\n \n web_app = Flask(__name__)\n \n #CONFIG\n from web_frame.web_config import webConfig\n web_app.config.from_object(webConfig)\n web_app.config.from_pyfile(config_filepath, silent=True)\n print_settings(web_app.config.items())\n \n #Initialise Log\n from web_frame.web_logger import Log\n log_filepath = os.path.join(web_app.root_path,\n web_app.config['LOG_FILE_PATH'])\n Log.init(log_filepath=log_filepath)\n \n #Load DB, Migrate\n from flask_migrate import Migrate\n from web_frame.model import db\n db.init_app(web_app)\n migrate = Migrate(web_app, db)\n \n #Model\n from web_frame.model.user import User\n with web_app.app_context():\n db.create_all() # Create tables\n \n #Admin\n from flask_admin import Admin\n from web_frame.admin import AdminLogin, AuthModelView\n admin = Admin(web_app, index_view=AdminLogin(), name='miniweb', template_mode='bootstrap3')\n admin.add_view(AuthModelView(User, db.session))\n \n #Mail\n from web_frame.web_mail import mail\n mail.init_app(web_app)\n \n #Load view functions\n from web_frame.controller import general\n from web_frame.controller import login\n from web_frame.controller import register_user\n from web_frame.controller import main\n from web_frame.controller import email\n from web_frame.controller import recover\n \n #Blueprint\n from web_frame.web_blueprint import web_frame \n web_app.register_blueprint(web_frame)\n \n #SessionInterface\n from web_frame.cache_session import SimpleCacheSessionInterface\n web_app.session_interface = SimpleCacheSessionInterface()\n \n #Common error handlers\n web_app.register_error_handler(404, not_found)\n web_app.register_error_handler(500, server_error)\n \n return web_app\n \n\n " }, { "alpha_fraction": 0.6387154459953308, "alphanum_fraction": 0.6409456133842468, "avg_line_length": 29.164382934570312, "blob_id": "1e1155798266697859a253a19c77704015ae9d03", "content_id": "18682424a31a2d8eb0056d0d927672ef761d0e18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2242, "license_type": "no_license", "max_line_length": 98, "num_lines": 73, "path": "/web_frame/admin.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import current_app, redirect, request, session, url_for, make_response, render_template\nfrom werkzeug import generate_password_hash\nfrom wtforms import TextField\n\nfrom web_frame.model import db\nfrom web_frame.web_logger import Log \nfrom web_frame.model.user import User\nfrom web_frame.controller.login import login_required\n\nfrom flask_admin import expose, Admin, AdminIndexView, BaseView, babel \nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin import helpers as h\n\n\ndef render(self, template, **kwargs):\n kwargs['admin_view'] = self\n kwargs['admin_base_template'] = self.admin.base_template\n\n kwargs['_gettext'] = babel.gettext\n kwargs['_ngettext'] = babel.ngettext\n kwargs['h'] = h\n\n kwargs['get_url'] = self.get_url\n kwargs['config'] = current_app.config\n kwargs.update(self._template_args)\n\n response = make_response(render_template(template, **kwargs), 200)\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Pragma'] = 'no-cache'\n response.headers['Expires'] = '0'\n return response\n\nBaseView.render = render\n\nclass AdminAuth:\n \n def authorised(self):\n \n access = False\n username = session['user_info'].username\n current_user = User.query.filter_by(username=username).first()\n if current_user.admin:\n access = True\n return access\n \n \nclass AdminLogin(AdminIndexView, AdminAuth):\n @expose('/')\n @login_required\n def index(self):\n \n if self.authorised():\n print(self.is_accessible())\n return super(AdminLogin, self).index()\n else:\n return redirect(url_for('web_frame.main'))\n\n\n \nclass AuthModelView(ModelView, AdminAuth):\n @expose('/')\n @login_required\n def index_view(self):\n \n if self.authorised():\n return super(AuthModelView, self).index_view()\n else:\n return redirect(url_for('web_frame.main'))\n def on_model_change(self, form, User, is_created=False):\n if hasattr(form, \"password\"):\n User.password = generate_password_hash(form.password.data)\n \n\n \n \n \n" }, { "alpha_fraction": 0.6129032373428345, "alphanum_fraction": 0.6241233944892883, "avg_line_length": 25.22222137451172, "blob_id": "46c7822455b5d52e4d924549b69976fd3d303b3a", "content_id": "a308be089cf46c25a1dc87d86a26665bb37dc04a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "no_license", "max_line_length": 96, "num_lines": 27, "path": "/web_frame/web_config.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\n\nclass webConfig:\n \n # SQLAlchemy\n SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir,'resource/database', 'web.db')\n SQLALCHEMY_TRACK_MODIFICATIONS = False\n SQLALCHEMY_ECHO = False\n TEMP_FOLDER= 'resource/temp/'\n # Session\n PERMANENT_SESSION_LIFETIME = 60 * 60\n SESSION_COOKIE_NAME= 'web_session'\n # Log\n LOG_LEVEL= 'debug'\n LOG_FILE_PATH= 'resource/log/web.log'\n # Mail\n MAIL_SERVER = 'smtp.gmail.com'\n MAIL_USE_SSL = True\n MAIL_PORT = 465\n MAIL_DEFAULT_SENDER = '[email protected]'\n MAIL_USERNAME = '[email protected]'\n MAIL_PASSWORD = 'example'\n TESTING = True\n\n " }, { "alpha_fraction": 0.5239595174789429, "alphanum_fraction": 0.5281047821044922, "avg_line_length": 33.02824783325195, "blob_id": "ef28c579e6d13bfd909056110875fcfe23fd5c4d", "content_id": "585c4daffc059e3c9912113417f881629e873d8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6031, "license_type": "no_license", "max_line_length": 86, "num_lines": 177, "path": "/web_frame/controller/register_user.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom flask import abort, redirect, render_template, request, session, url_for, jsonify\nfrom werkzeug import generate_password_hash\nfrom wtforms import Form, TextField, PasswordField, HiddenField, validators\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db\nfrom web_frame.model.user import User\nfrom web_frame.controller.general import get_user, login_required\n \n@web_frame.route('/user/regist')\ndef register_user_form():\n \n form = RegisterForm(request.form)\n \n return render_template('regist.html', form=form)\n\n@web_frame.route('/user/regist', methods=['POST'])\ndef register_user():\n \n if 'user_info' in session:\n return redirect(url_for('.main'))\n \n form = RegisterForm(request.form)\n \n if form.validate():\n \n username = form.username.data\n email = form.email.data\n password = form.password.data\n \n try: \n user = User(username=username,\n email=email, \n password=generate_password_hash(password))\n db.session.add(user)\n db.session.commit()\n \n Log.debug(user)\n \n except Exception as e:\n error = \"DB error occurs : \" + str(e)\n Log.error(error)\n db.session.rollback()\n raise e\n \n else:\n return redirect(url_for('.login', regist_username=username))\n else:\n return render_template('regist.html', form=form)\n \n@web_frame.route('/user/update_info/<username>')\n@login_required\ndef update_user_form(username):\n\n if username != session['user_info'].username:\n abort(404)\n \n current_user = get_user(username)\n form = UpdateForm(request.form, current_user)\n \n return render_template('regist.html', user=current_user, form=form)\n\n@web_frame.route('/user/update_info/<username>', methods=['POST'])\n@login_required\ndef update_user(username):\n \n if username != session['user_info'].username:\n abort(404)\n \n current_user = get_user(username)\n form = UpdateForm(request.form)\n \n if form.validate():\n email = form.email.data\n password = form.password.data\n \n try: \n current_user.email = email\n current_user.password = generate_password_hash(password)\n db.session.commit()\n \n except Exception as e:\n db.session.rollback()\n Log.error(str(e))\n raise e\n \n else:\n session['user_info'].email = current_user.email\n session['user_info'].password = current_user.password\n session['user_info'].password_confirm = current_user.password_confirm\n \n return redirect(url_for('.login', update_username=username))\n else:\n return render_template('regist.html', user=current_user, form=form)\n\n@web_frame.route('/user/unregist')\n@login_required\ndef unregist():\n user_id = session['user_info'].id\n \n try:\n user = User.query.filter_by(id=user_id).first()\n Log.info(\"unregist:\"+user.username)\n \n if user.id == user_id:\n db.session.delete(user)\n db.session.commit()\n \n else:\n Log.error(\"Following user does not exist: %d\", user_id)\n raise Exception\n \n except Exception as e:\n Log.error(str(e))\n db.session.rollback()\n raise error\n \n return redirect(url_for('.logout'))\n\n@web_frame.route('/user/check_name', methods=['POST'])\ndef check_name():\n \n username = request.json['username']\n \n if get_user(username):\n return jsonify(result=False)\n else:\n return jsonify(result=True)\n \n############################################################################ \nclass UpdateForm(Form):\n \n username = TextField('Username')\n email = TextField('Email',[validators.Required('Enter email.'),\n validators.Email(message='Not a valid e-mail!')])\n password = PasswordField('New Password',\n [validators.Required('Enter password.'),\n validators.Length(\n min=4,\n max=50,\n message='Must be between 4 and 50 characters.'),\n validators.EqualTo('password_confirm',\n message='Password mismatch!')])\n password_confirm = PasswordField('Confirm Password')\n \n username_check = HiddenField('Username Check', \n [validators.Required('Check name availability.')])\n \n############################################################################\nclass RegisterForm(Form):\n \n username = TextField('Username',\n [validators.Required('Enter username.'),\n validators.Length(\n min=4,\n max=50,\n message='Must be between 4 and 50 characters.')])\n email = TextField('Email',[validators.Required('Enter email.'),\n validators.Email(message='Not a valid e-mail!')])\n \n password = PasswordField('New Password',\n [validators.Required('Enter password.'),\n validators.Length(\n min=4,\n max=50,\n message='Must be between 4 and 50 characters.'),\n validators.EqualTo('password_confirm',\n message='Password mismatch!')])\n password_confirm = PasswordField('Confirm Password')\n \n username_check = HiddenField('Username Check', \n [validators.Required('Check name availability.')])\n " }, { "alpha_fraction": 0.5776424407958984, "alphanum_fraction": 0.5780774354934692, "avg_line_length": 33.66666793823242, "blob_id": "279664d0e69dfa42c076591b5e129f8b67398af3", "content_id": "31b0fd32399b700ac1b788382451b5fefca389c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2299, "license_type": "no_license", "max_line_length": 80, "num_lines": 66, "path": "/web_frame/controller/recover.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import redirect, render_template, request, session, url_for\nfrom werkzeug import generate_password_hash\nfrom wtforms import Form, TextField, validators\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db\nfrom web_frame.model.user import User\nfrom web_frame.controller.general import password_generator\n\n\n@web_frame.route('/user/recover_account')\ndef recover_account_form():\n \n form = RecoverForm(request.form)\n recover_error = request.args.get('error','')\n recover_mail = request.args.get('recover_mail','')\n \n return render_template('recover.html', \n form=form, \n error=recover_error,\n recover_mail=recover_mail)\n \n@web_frame.route('/user/recover_account', methods=['POST'])\ndef recover_account():\n \n form = RecoverForm(request.form)\n recover_error = None\n if form.validate():\n \n email = form.email.data\n new_password = password_generator()\n \n try:\n current_user = User.query.filter_by(email=email).first()\n except Exception as e:\n Log.error(str(e))\n \n if not current_user:\n recover_error = 'No matching account!'\n return redirect(url_for('.recover_account', error=recover_error))\n \n if not current_user.email_verified:\n recover_error = 'Account not verified yet!'\n return redirect(url_for('.recover_account', error=recover_error))\n \n try:\n current_user.password = generate_password_hash(new_password)\n db.session.commit()\n return redirect(url_for('.send_recover_email', email=email))\n \n except Exception as e:\n db.session.rollback()\n Log.error(str(e))\n recover_error = 'Error!'\n raise e\n \n return render_template('recover.html', form=form, error=recover_error)\n \n######################################################################\nclass RecoverForm(Form):\n \n email = TextField('Email',[validators.Required('Enter email.'),\n validators.Email(message='Not a valid e-mail!')])\n \n\n \n" }, { "alpha_fraction": 0.505433201789856, "alphanum_fraction": 0.5092511177062988, "avg_line_length": 31.399999618530273, "blob_id": "baa445461025e9fe228ba7dea03e346a41789fef", "content_id": "411e66c136231a8e3b0ff20a5349012a67a1614f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3405, "license_type": "no_license", "max_line_length": 76, "num_lines": 105, "path": "/web_frame/controller/login.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import redirect, render_template, request, session, url_for\nfrom werkzeug import check_password_hash\nfrom wtforms import Form, TextField, PasswordField, HiddenField, validators\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db \nfrom web_frame.model.user import User\nfrom web_frame.controller.general import login_required, get_user\n\n \n@web_frame.route('/')\n@login_required\ndef index():\n return redirect(url_for('.main'))\n\n@web_frame.route('/user/login')\ndef login_form():\n \n if 'user_info' in session:\n return redirect(url_for('.main'))\n \n next_url = request.args.get('next','')\n regist_username = request.args.get('regist_username','')\n update_username = request.args.get('update_username','')\n recover_mail = request.args.get('recover_mail','')\n Log.info('(%s)next_url is %s' % (request.method, next_url))\n \n form = LoginForm(request.form)\n \n return render_template('login.html',\n next_url=next_url,\n form=form,\n regist_username=regist_username,\n update_username=update_username)\n\n@web_frame.route('/user/login', methods=['POST'])\ndef login():\n\n form = LoginForm(request.form)\n next_url = form.next_url.data\n login_error = None\n \n if form.validate():\n session.permanent = True\n \n username = form.username.data\n password = form.password.data\n next_url = form.next_url.data\n \n Log.info('(%s)next_url is %s' % (request.method, next_url))\n \n user = get_user(username)\n \n if user:\n if not check_password_hash(user.password, password):\n login_error = 'Invalid password'\n \n else:\n session['user_info'] = user\n Log.info('%s has logged in' % user)\n if user.admin:\n return redirect(url_for('admin.index'))\n elif next_url != '': \n return redirect(next_url)\n else:\n return redirect(url_for('.main'))\n \n else: \n login_error = 'User does not exist'\n \n return render_template('login.html',\n next_url=next_url,\n form=form,\n error=login_error)\n \n@web_frame.route('/logout')\n@login_required\ndef logout():\n \n Log.info('%s has logged out' % session['user_info'])\n session.clear()\n \n return redirect(url_for('.index'))\n\n############################################################################\nclass LoginForm(Form):\n \n username = TextField('Username',\n [validators.Required('Enter user name'),\n validators.Length(\n min=4,\n max=20,\n message='4~20 characters')])\n \n password = PasswordField('New Password',\n [validators.Required('Enter password'),\n validators.Length(\n min=4,\n max=20,\n message='4~20 characters')])\n \n next_url = HiddenField('Next URL')\n " }, { "alpha_fraction": 0.6955128312110901, "alphanum_fraction": 0.6987179517745972, "avg_line_length": 30.299999237060547, "blob_id": "4c7756d8ae95cc0a7b923c0d2551e4fbbfbfd0b5", "content_id": "bea360b852ad18fbf430dffcd1cb271fb3789b20", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 103, "num_lines": 10, "path": "/web_frame/web_blueprint.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import Blueprint\nfrom web_frame.web_logger import Log\n\n\nweb_frame = Blueprint('web_frame', __name__, template_folder='../templates', static_folder='../static')\n\nLog.info('static folder: %s' % web_frame.static_folder)\nLog.info('template folder: %s' % web_frame.template_folder)" }, { "alpha_fraction": 0.6115702390670776, "alphanum_fraction": 0.6198347210884094, "avg_line_length": 12.44444465637207, "blob_id": "3a613211a56b3a6c9d377af8420faf4f534ba0ab", "content_id": "1fb3d359dddc7cb33370a9b204ffa61ae23e13a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 121, "license_type": "no_license", "max_line_length": 39, "num_lines": 9, "path": "/web_frame/model/__init__.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask_sqlalchemy import SQLAlchemy\n\n\ndb = SQLAlchemy()\nBase = db.Model\n\n__all__ = ['user']\n" }, { "alpha_fraction": 0.5756784677505493, "alphanum_fraction": 0.5793319344520569, "avg_line_length": 27.53731346130371, "blob_id": "38d003fb68b26595ebefd8b06e5c5564e28bb433", "content_id": "61169dd1e86ce2ba57a45aefa1992fd8c0df1386", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1916, "license_type": "no_license", "max_line_length": 100, "num_lines": 67, "path": "/web_frame/controller/general.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport string\nfrom functools import wraps\nfrom random import *\n\nfrom flask import current_app, redirect, request, session, url_for\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.model import db\nfrom web_frame.model.user import User\nfrom web_frame.web_logger import Log\n\n@web_frame.after_request\ndef add_header(r):\n \"\"\"Disable page caching to avoid back-button problems\"\"\"\n \n r.headers.add('Cache-Control', 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0')\n return r\n \n@web_frame.teardown_request\ndef close_db_session(exception=None):\n \n try:\n db.session.remove()\n except Exception as e:\n Log.error(str(e))\n\n\n########################################################################\ndef login_required(f):\n \n @wraps(f)\n def decorated_function(*args, **kwargs):\n \"\"\"Check whether logged in\"\"\"\n try:\n session_key = request.cookies.get(\n current_app.config['SESSION_COOKIE_NAME'])\n is_login = False\n if session.sid == session_key and session.__contains__('user_info'):\n is_login = True\n \n if not is_login:\n return redirect(url_for('web_frame.login', next=request.url))\n \n return f(*args, **kwargs)\n \n except Exception as e:\n Log.error('Web error: %s' % str(e))\n raise e\n \n return decorated_function\n\ndef get_user(username):\n try:\n current_user = User.query.filter_by(username=username).first()\n Log.debug(current_user)\n return current_user\n \n except Exception as e:\n Log.error(str(e))\n raise e\n \ndef password_generator():\n characters = string.ascii_letters + string.digits\n password = ''.join(choice(characters) for x in range(randint(20,30)))\n return password\n " }, { "alpha_fraction": 0.6692456603050232, "alphanum_fraction": 0.6697292327880859, "avg_line_length": 31.234375, "blob_id": "d132bf78d0bb6515625eb6c9b7a1fe16cbac5581", "content_id": "52e9f0533117a84737cf3e1ec70943378fba4754", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2068, "license_type": "no_license", "max_line_length": 92, "num_lines": 64, "path": "/web_frame/controller/email.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom flask_mail import Message\n\nfrom web_frame.web_mail import mail\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db\nfrom web_frame.model.user import User\nfrom web_frame.controller.general import get_user, login_required\nfrom web_frame.controller.token import generate_verification_token, create_token_link\n\n \n@web_frame.route('/user/send_recover_email')\ndef send_recover_email():\n \n email = request.args.get('email','')\n recover_account = User.query.filter_by(email=email).first()\n \n username = recover_account.username\n password = recover_account.password\n \n msg = Message()\n msg.html = render_template('email/recover.html', username=username, password=password)\n msg.recipients = [email]\n \n try:\n mail.send(msg)\n Log.info('Recovery mail sent to %s' % (username))\n recover_mail = 'E-mail successfully sent!'\n return redirect(url_for('.recover_account', recover_mail=recover_mail))\n except Exception as e:\n Log.error(str(e))\n raise e\n \n return redirect(url_for('.recover_account'))\n\n@web_frame.route('/user/send_token_email')\n@login_required\ndef send_token_email():\n \n current_user = get_user(session['user_info'].username)\n if current_user.email_verified:\n return redirect(url_for('.main'))\n \n username = current_user.username\n email = current_user.email\n \n token = generate_verification_token(email)\n confirm_url = create_token_link(token)\n msg = Message(\"Account verification\")\n msg.html = render_template('email/activation.html', confirm_url=confirm_url)\n msg.recipients = [email]\n \n try:\n mail.send(msg)\n Log.info('Verification mail sent to %s' % (username))\n return redirect(url_for('.main', email_sent=True))\n except Exception as e:\n Log.error(str(e))\n raise e\n \n return redirect(url_for('.main', email_error=True))\n \n" }, { "alpha_fraction": 0.6451301574707031, "alphanum_fraction": 0.6460945010185242, "avg_line_length": 34.7931022644043, "blob_id": "9f74150f278ae472234b9ac7552f1bf6324f7f48", "content_id": "aff832faef6688c9b4315ab09e3932aa756a9c28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1037, "license_type": "no_license", "max_line_length": 83, "num_lines": 29, "path": "/web_frame/controller/main.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom flask import current_app, redirect, render_template, request, session, url_for\n\nfrom web_frame.web_blueprint import web_frame\nfrom web_frame.web_logger import Log\nfrom web_frame.model import db\nfrom web_frame.model.user import User\nfrom web_frame.controller.general import login_required, get_user\n\n\n@web_frame.route('/main')\n@login_required\ndef main():\n \n current_user = get_user(session['user_info'].username)\n username = current_user.username\n email_verified = current_user.email_verified\n email_sent = request.args.get('email_sent', '')\n email_error = request.args.get('email_error', '')\n user_activated = request.args.get('user_activated', '')\n \n if current_user.admin:\n return redirect(url_for('admin.index'))\n return render_template('main.html',\n email_verified=email_verified,\n email_sent=email_sent,\n email_error=email_error,\n user_activated=user_activated)" }, { "alpha_fraction": 0.5641509294509888, "alphanum_fraction": 0.5641509294509888, "avg_line_length": 31.15151596069336, "blob_id": "2904679eaa1fa923aeb957b78e51c83504e2bbdb", "content_id": "4021e900de4fa4c3e67d0d95d9a60c815038181e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1062, "license_type": "no_license", "max_line_length": 141, "num_lines": 33, "path": "/web_frame/templates/main.html", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "{% extends \"layout.html\" %}\n{% block title %}Main{% endblock %}\n{% block head %}\n{{ super() }}\n{% endblock %}\n{% block content %}\n<div class=\"container\">\n {% if email_sent %}\n <div id=\"email_sent\" class=\"alert alert-info\">\n <a class=\"close\" data-dismiss=\"alert\" href=\"#\">×</a>\n Verification email sent.\n </div>\n {% endif %}\n {% if email_error %}\n <div class=\"alert alert-danger\">\n <i class=\"fa fa-exclamation-circle\" aria-hidden=\"true\"></i>\n Email error occured. \n </div> \n {% endif %}\n {% if user_activated %}\n <div class=\"alert alert-info\">\n <a class=\"close\" data-dismiss=\"alert\" href=\"#\">×</a>\n Your account has been activated!\n </div>\n {% endif %}\n {% if not email_verified %}\n <div class=\"alert alert-danger\">\n <i class=\"fa fa-exclamation-circle\" aria-hidden=\"true\"></i>\n You have not verified your e-mail. <a id=\"send_email\" href=\"{{ url_for('web_frame.send_token_email') }}\">Send e-mail verification</a>\n </div>\n {% endif %}\n</div>\n{% endblock %}" }, { "alpha_fraction": 0.5277777910232544, "alphanum_fraction": 0.5370370149612427, "avg_line_length": 35.33333206176758, "blob_id": "69687367c51937eebcb622e0d0ee1bd637b6f725", "content_id": "19cb021f25f5cd3d90230c6ab9610a0f16e9836b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 108, "license_type": "no_license", "max_line_length": 83, "num_lines": 3, "path": "/web_frame/controller/__init__.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__all__ = ['general','login', 'register_user', 'main', 'email', 'token', 'recover']" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 13.142857551574707, "blob_id": "c42dc1a3cd60d13af9d3a7a01b9e49f0b14bb1a3", "content_id": "595519e641c09ee52c56d5e4baf6bf6be8f2e074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "no_license", "max_line_length": 43, "num_lines": 7, "path": "/README.md", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# miniweb\n\nPython Flask mini-web framework\n\nFeatures:\n\nRegister user, login, e-mail actions, admin \n" }, { "alpha_fraction": 0.5866084694862366, "alphanum_fraction": 0.5982532501220703, "avg_line_length": 33.400001525878906, "blob_id": "c2aeff6da8870107436d353627f5ed9024f74130", "content_id": "6c5917a642645026af6d15f120918166655304b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "no_license", "max_line_length": 78, "num_lines": 20, "path": "/web_frame/model/user.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom web_frame.model import Base, db\n\n\nclass User(Base):\n \n __tablename__ = 'users'\n \n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(50), unique=True)\n email = db.Column(db.String(50), unique=False)\n email_verified = db.Column(db.Boolean, nullable=False, default=False)\n admin = db.Column(db.Boolean, nullable=False, default=False)\n password = db.Column(db.String(100), unique=False)\n date_created = db.Column(db.DateTime, default=db.func.current_timestamp())\n \n ################################################\n def __repr__(self):\n return '<User %r %r>' % (self.username, self.email)" }, { "alpha_fraction": 0.6107594966888428, "alphanum_fraction": 0.6170886158943176, "avg_line_length": 38.5, "blob_id": "e4420da3aae1f1f03af6df948ada52653c5436dc", "content_id": "7b414ae6bda2bbbb15cd41a91479875edb7e0f69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 316, "license_type": "no_license", "max_line_length": 89, "num_lines": 8, "path": "/web_frame/templates/admin/master.html", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "{% extends 'admin/base.html' %}\n{% block head %}\n<link rel=\"shortcut icon\" href=\"{{ url_for('static',filename='img/favicon.ico') }}\" />\n{% endblock %}\n{% block title %}Admin{% endblock %}\n{% block menu_links %}\n<a style=\"float:right; margin: 15px;\" href=\"{{ url_for('web_frame.logout') }}\">Logout</a>\n{% endblock %}\n" }, { "alpha_fraction": 0.6235294342041016, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 18, "blob_id": "cb17824e4a74efbc00116563a4872d0e7a3936e9", "content_id": "6f9c889eb7bdcc34a6037cc5b78f59f315a7370f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 54, "num_lines": 9, "path": "/run_server.py", "repo_name": "rico0821/miniweb", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom web_frame import create_app\n\n\napplication = create_app()\n\nprint('Starting server...')\napplication.run(host='0.0.0.0', port=2000, debug=True)" } ]
18
makariim/Pipelined-Processor
https://github.com/makariim/Pipelined-Processor
b41c1a9bcd48bf8018993413609aa1aa1fa06cc6
c3cb7ba5f5fa97afaa92618b303b66e085307446
ee67b548c498ebdef2a00c24a1629cd57ad72b75
refs/heads/master
2020-05-18T00:16:53.255662
2019-05-23T08:22:02
2019-05-23T08:22:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4091040790081024, "alphanum_fraction": 0.4760800302028656, "avg_line_length": 24.738805770874023, "blob_id": "e5264dfc2df5b1a6746f9814f801d2356e5e3126", "content_id": "7f9828f0fb2d7300eb73a00976e5132e84879ddd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3449, "license_type": "no_license", "max_line_length": 105, "num_lines": 134, "path": "/Assembler.py", "repo_name": "makariim/Pipelined-Processor", "src_encoding": "UTF-8", "text": "\nTwoOp = ['SHR','SHL','LDM','ADD','SUB','AND','OR','MOV','LDD','STD']\nOneOp = ['IN','OUT','NOT','INC','DEC','PUSH','POP','CALL','JZ','JN','JC','JMP']\n\n#Commands = ['SHR','SHL','ADD','SUB','AND','OR','MOV','LDM','LDD','STD',,'RET','RTI','NOP','CLRC','SETC']\nOpCode = {\n 'SHR':'111001',\n 'SHL':'111101',\n 'ADD':'110000',\n 'SUB':'110010',\n 'AND':'110100',\n 'OR':'110110',\n 'NOT':'100000',\n 'INC':'100001',\n 'DEC':'100010',\n 'PUSH':'100011',\n 'POP':'100100',\n 'CALL':'100101',\n 'RET':'100110',\n 'RTI':'100111',\n 'NOP':'00000',\n 'MOV':'00001',\n 'CLRC':'00010',\n 'SETC':'00011',\n 'IN':'001000',\n 'OUT':'001001',\n 'LDM':'001010',\n 'LDD':'001011' ,\n 'STD':'001100',\n 'JZ':'01000',\n 'JN':'01001',\n 'JC':'01010',\n 'JMP':'01011',\n }\n\ndef GetRegIdx(IDX):\n IDX = str(\"{0:b}\".format(int(IDX)))\n IDX = IDX.rjust(3,'0')\n return IDX\n\nfile = open('input.txt','r')\n#Out = open('Output.txt','w')\nOut = open('Output.mem','w')\nOut.write('// memory data file (do not edit the following line - required for mem load use)\\n')\nOut.write('// instance=/Project/my_ram/ram\\n')\nOut.write('// format=mti addressradix=d dataradix=b version=1.0 wordsperline=1\\n')\n\nlines = file.readlines()\ni = 0\nfor L in lines:\n\n Instruction = \"\"\n Operand1 = \"\"\n Operand2 = \"\"\n IR = \"\"\n Value = \"\"\n L = L.replace(',',' ')\n L = L.replace(';',' ')\n L = L.replace('\\n','')\n Words = L.split()\n if(len(Words) == 0):\n continue\n\n Instruction = Words[0].upper()\n if Instruction == \".ORG\":\n MEMlocation = int(Words[1])\n while i != MEMlocation:\n Out.write(str(i)+': ' + \"0\" * 16 + '\\n')\n i+=1\n continue\n\n try: #Wrtiting value in the memory.\n Val = str(\"{0:b}\".format(int(Words[0])))\n Val = Val.rjust(16,'0')\n Out.write(str(i)+': ' +Val+'\\n')\n i+=1\n continue\n except:\n pass\n\n if(not OpCode.__contains__(Instruction)):\n continue\n \n \n IR+=OpCode[Instruction]\n if TwoOp.count(Instruction) > 0:\n Operand1 = Words[1].upper()\n Operand2 = Words[2].upper()\n\n if(TwoOp.index(Instruction) < 2): #SHL or SHR\n IMM = str(\"{0:b}\".format(int(Operand2)))\n IMM = IMM.rjust(7,'0')\n IR += IMM\n IR += GetRegIdx(Operand1[1])\n\n elif(TwoOp.index(Instruction) == 2): #LDM\n #Ask The bitches.\n IR = IR.ljust(10,'0')\n IR += GetRegIdx(Operand1[1])\n IR = IR.ljust(16,'0')\n Value = str(\"{0:b}\".format(int(Operand2)))\n Value = Value.rjust(16,'0')\n \n else:\n IR = IR.ljust(10,'0')\n IR += GetRegIdx(Operand2[1])\n IR += GetRegIdx(Operand1[1])\n\n elif OneOp.count(Instruction) > 0:\n Operand1 = Words[1].upper()\n if(OneOp.index(Instruction) <= 1):\n IR = IR.ljust(10,'0')\n IR += GetRegIdx(Operand1[1])\n IR += '000'\n else:\n IR = IR.ljust(13,'0')\n IR += GetRegIdx(Operand1[1])\n else:\n IR = IR.ljust(16,'0') \n\n #Write in the output file.\n Out.write(str(i)+': ')\n i += 1\n Out.write(IR)\n print(IR)\n Out.write('\\n')\n if(Value != \"\"):\n Out.write(str(i)+': ')\n i += 1\n Out.write(Value)\n Out.write('\\n')\n #-------------------------\n\nfile.close()\nOut.close()" } ]
1
alexgrigoras/youtube_consumer_perception
https://github.com/alexgrigoras/youtube_consumer_perception
74cc240a133acc2cbd03d3ec06aab58ca53705d9
63fd562b2a9a07808c4ab09ebe9900f2e93605d1
7a56ac22e67df3a4ec207490bee5038a783c4700
refs/heads/master
2022-12-30T00:32:36.995883
2020-10-22T15:48:13
2020-10-22T15:48:13
254,565,750
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5815956592559814, "alphanum_fraction": 0.5956482291221619, "avg_line_length": 35.012245178222656, "blob_id": "d50464c8f0e0d55a2313535bb66cefd16c69cdf5", "content_id": "bbe75b8cf0826bb69fc3b3f2ff06050a33f9f32d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8833, "license_type": "permissive", "max_line_length": 120, "num_lines": 245, "path": "/youtube_sentiment_analysis/modules/display.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Display data module\n @alexandru_grigoras\n\"\"\"\n# Libraries\nimport operator\n\nimport mplcursors\nimport numpy as np\nimport scipy\nimport seaborn as sns\nfrom PyQt5.QtWidgets import QSizePolicy\nfrom matplotlib import ticker\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom wordcloud import WordCloud\n\n# Constants\n__all__ = ['DisplayData']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\n\nclass DisplayData(FigureCanvas):\n \"\"\"Analyse the data and determine sentiment and word frequency\"\"\"\n\n def __init__(self, parent=None, width=5, height=4, dpi=100):\n \"\"\"Class constructor\"\"\"\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n self.ax = self.fig.add_subplot(111)\n\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n\n self.__parent = parent\n self.__width = width\n self.__height = height\n\n FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n self.__fig_flag = False\n\n def plot_classifiers(self, x_values, y_values, size, color, x_names, y_names,\n comments, videos, author, comm_time):\n \"\"\"Plot the results of the classifiers\"\"\"\n\n # center lines\n self.ax.spines['left'].set_color('none')\n self.ax.spines['right'].set_position('center')\n self.ax.spines['bottom'].set_color('none')\n self.ax.spines['top'].set_position('center')\n self.ax.spines['right'].set_color('gray')\n self.ax.spines['top'].set_color('gray')\n self.ax.spines['left'].set_smart_bounds(True)\n self.ax.spines['bottom'].set_smart_bounds(True)\n self.ax.xaxis.set_ticks_position('bottom')\n self.ax.yaxis.set_ticks_position('left')\n\n # map the size of points to [10, 200]\n mapped_size = []\n for x in size:\n mapped_size.append(self.__map(x, min(size), max(size), 10, 200))\n\n # scatter points\n sc = self.ax.scatter(x_values, y_values, c=color, s=mapped_size)\n\n # labels and limits\n self.ax.set_xlabel(x_names)\n self.ax.set_ylabel(y_names)\n self.ax.set_xlim([-1, 1])\n self.ax.set_ylim([0, 10])\n self.ax.xaxis.set_ticks(np.arange(-1, 1, 0.25))\n self.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n self.ax.yaxis.set_ticks(np.arange(0, 10, 2))\n self.ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n self.fig.suptitle(\"Rezultatele algoritmilor de clasificare\")\n\n # Colorbar with label\n divider = make_axes_locatable(self.ax)\n cax = divider.append_axes('right', size='3%', pad=0.05)\n cb = self.fig.colorbar(sc, cax=cax, orientation='vertical')\n cb.set_label('Gradul de încredere')\n\n c2 = mplcursors.cursor(self.ax)\n\n @c2.connect(\"add\")\n def _(sel):\n sel.annotation.get_bbox_patch().set(fc=\"white\", alpha=0.9)\n sel.annotation.arrow_patch.set(arrowstyle=\"simple\", fc=\"black\", alpha=0.9)\n sel.annotation.set_text(\"Video: \" + videos[sel.target.index] + \"\\n\" +\n \"Text: \" + comments[sel.target.index] + \"\\n\" +\n \"Likes: \" + str(int(size[sel.target.index])) + \"\\n\" +\n \"Author: \" + author[sel.target.index] + \"\\n\" +\n \"Time: \" + comm_time[sel.target.index])\n sel.annotation.draggable(True)\n\n self.__fig_flag = True\n\n self.draw()\n\n def plot_heatmap(self, x_values, y_values, x_names, y_names):\n \"\"\"Plot the heatmap for classifiers result\"\"\"\n\n # Define numbers of generated data points and bins per axis.\n n_bins = 8\n\n # Construct 2D histogram from data using the 'plasma' colormap\n h, xedges, xedges, image = self.ax.hist2d(x_values, y_values, bins=n_bins, cmap='jet', range=[[-1, 1], [0, 10]])\n\n # Plot a colorbar with label.\n cb = self.fig.colorbar(image)\n cb.set_label('Număr de recenzii')\n\n # Center lines and limits\n self.ax.spines['left'].set_color('none')\n self.ax.spines['right'].set_position('center')\n self.ax.spines['bottom'].set_color('none')\n self.ax.spines['top'].set_position('center')\n self.ax.spines['right'].set_color('gray')\n self.ax.spines['top'].set_color('gray')\n self.ax.spines['left'].set_smart_bounds(True)\n self.ax.spines['bottom'].set_smart_bounds(True)\n self.ax.xaxis.set_ticks(np.arange(-1, 1, 0.25))\n self.ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n self.ax.yaxis.set_ticks(np.arange(0, 10, 2))\n self.ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1f'))\n\n # Title and labels\n self.fig.suptitle('Harta termografică a rezultatelor clasificării')\n self.ax.set_xlabel(x_names)\n self.ax.set_ylabel(y_names)\n\n self.__fig_flag = True\n\n self.draw()\n\n def plot_word_frequency(self, items):\n \"\"\"Plot the word frequency\"\"\"\n\n # sort the above items\n sorted_tuples = sorted(items, key=operator.itemgetter(1), reverse=True)\n a = [i[0] for i in sorted_tuples[0:20]]\n b = [i[1] for i in sorted_tuples[0:20]]\n x = scipy.arange(len(b))\n y = scipy.array(b)\n\n color_space = sns.color_palette('viridis', len(x))\n self.ax.bar(x, y, align='center', color=color_space)\n self.ax.set_xticks(x)\n self.ax.set_xticklabels(a, rotation=45)\n self.ax.set_xlabel('Cuvinte')\n self.ax.set_ylabel('Frecvența')\n self.fig.suptitle('Frecvența cuvintelor din recenzii')\n\n self.__fig_flag = True\n\n self.draw()\n\n def plot_wordcloud(self, tokens):\n \"\"\"Plot the wordcloud\"\"\"\n\n # Generate a word cloud image\n plain_text = \"\".join([\" \" + i if not i.startswith(\"'\") else i for i in tokens]).strip()\n wordcloud = WordCloud(background_color=\"white\", contour_color='steelblue').generate(plain_text)\n self.ax.imshow(wordcloud, interpolation='bilinear')\n self.ax.set_xticks([], [])\n self.ax.set_yticks([], [])\n self.ax.set_xlabel(\"\")\n self.ax.set_ylabel(\"\")\n self.fig.suptitle(\"Wordcloud\")\n\n self.__fig_flag = True\n\n self.draw()\n\n def plot_accuracy(self, results, names):\n \"\"\"Make a boxplot with data from classifiers accuracy\"\"\"\n\n # boxplot algorithm comparison\n self.fig.suptitle('Acuretețea algoritmilor de clasificare')\n bp = self.ax.boxplot(results, notch=False, patch_artist=True)\n self.ax.set_xlabel('Algoritm de clasificare')\n self.ax.set_ylabel('Acuratețea rezultatelor utilizând metoda Cross-Validation')\n self.ax.set_xticklabels(names)\n\n # change outline color, fill color and linewidth of the boxes\n for box in bp['boxes']:\n # change outline color\n box.set(color='#7570b3', linewidth=2)\n # change fill color\n box.set(facecolor='#1b9e77')\n\n # change color and linewidth of the whiskers\n for whisker in bp['whiskers']:\n whisker.set(color='#7570b3', linewidth=2)\n\n # change color and linewidth of the caps\n for cap in bp['caps']:\n cap.set(color='#7570b3', linewidth=2)\n\n # change color and linewidth of the medians\n for median in bp['medians']:\n median.set(color='#b2df8a', linewidth=2)\n\n # change the style of fliers and their fill\n for flier in bp['fliers']:\n flier.set(marker='o', color='#e7298a', alpha=0.5)\n\n self.draw()\n\n def clear_plot(self):\n \"\"\"Clear the plot data\"\"\"\n\n if self.__fig_flag is True:\n self.fig.clf()\n self.ax = self.fig.add_subplot(111)\n\n @staticmethod\n def __map(value, left_min, left_max, right_min, right_max):\n \"\"\"Maps a value from one interval [left_min, left_max] to another [right_min, right_max]\"\"\"\n\n # Check intervals\n if right_min >= right_max:\n return right_min\n if left_min >= left_max:\n return right_min\n\n # Figure out how 'wide' each range is\n left_span = left_max - left_min\n right_span = right_max - right_min\n\n if left_span == 0:\n return 0\n\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - left_min) / float(left_span)\n\n # Convert the 0-1 range into a value in the right range.\n return right_min + (value_scaled * right_span)\n\n" }, { "alpha_fraction": 0.6037071943283081, "alphanum_fraction": 0.6051149964332581, "avg_line_length": 32.80952453613281, "blob_id": "58329e8249193c809911532f8e3def1764410b0a", "content_id": "8179408d31f183c673d59b40f40e294a6089f18a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4262, "license_type": "permissive", "max_line_length": 120, "num_lines": 126, "path": "/youtube_sentiment_analysis/modules/analysis.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Data analysis module\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nfrom __future__ import print_function\n\nimport time\n\nfrom nltk.probability import *\n\nfrom youtube_sentiment_analysis.modules.process import ProcessData\nfrom youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew\nfrom youtube_sentiment_analysis.modules.store import StoreData\nfrom youtube_sentiment_analysis.modules.training import TrainClassifier\nfrom youtube_sentiment_analysis.modules.vote_classifier import VoteClassifier\n\n# Constants\n__all__ = ['DataAnalysis']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\n\nclass DataAnalysis:\n \"\"\"Analyse the data and determine sentiment and word frequency\"\"\"\n\n def __init__(self, keyword, like_threshold_min, like_threshold_max):\n self.__keyword = keyword\n self.__like_threshold_min = like_threshold_min\n self.__like_threshold_max = like_threshold_max\n\n def get_data_from_DB(self):\n \"\"\"Get the downloaded videos data from MongoDB using store module\"\"\"\n # create a MongoDB connection\n mongo_conn = StoreData(self.__keyword, store=False)\n\n # get the data from MongoDB\n videos_data = mongo_conn.read()\n\n return videos_data\n\n def analyse(self, progress, console):\n \"\"\"Analyse data and prepare it for display module\"\"\"\n # get the starting time\n start_time = time.time()\n\n # variables\n videos = []\n likes = []\n author = []\n comm_time = []\n comments = []\n sentiment_val = []\n confidence_val = []\n sentiment_anew_arousal = []\n\n # get machine learning classifiers\n tc = TrainClassifier()\n classifiers = tc.get_classifiers(progress, console)\n\n # vote classifier object\n voted_classifier = VoteClassifier(classifiers)\n\n # process data object\n pd = ProcessData()\n\n # get data\n videos_data = self.get_data_from_DB()\n\n nr_videos = videos_data.count()\n\n progress_value = 0\n\n # parse data\n for video in videos_data:\n\n get_comments = video.get(\"comments\")\n nr_comments = len(get_comments)\n\n for comment in get_comments:\n # get likes\n like = float(comment.get(\"nr_likes\"))\n\n if self.__like_threshold_min <= like <= self.__like_threshold_max:\n videos.append(video.get(\"title\"))\n likes.append(like)\n author.append(comment.get(\"author\"))\n comm_time.append(comment.get(\"time\"))\n\n # get comments and apply filters\n comment_text = comment.get(\"text\")\n comments.append(comment_text)\n pd.process_text(comment_text)\n\n # machine learning algorithms sentiment value with voting system\n ml_algorithms_sentiment = voted_classifier.classify(comment_text, pd)\n sentiment_val.append(ml_algorithms_sentiment)\n\n # machine learning algorithms confidence value with voting system\n ml_algorithms_confidence = voted_classifier.confidence(comment_text, pd)\n confidence_val.append(ml_algorithms_confidence)\n\n # get ANEW arousal values\n anew_result_arousal = anew.sentiment(pd.get_tokens())['arousal']\n sentiment_anew_arousal.append(anew_result_arousal)\n\n progress_value += 80 / nr_videos / nr_comments\n progress.setValue(progress_value)\n\n if not pd.get_all_tokens():\n return\n\n # FreqDist returns a list of tuples containing each word and the number of its occurences\n fd = FreqDist(pd.get_all_tokens())\n\n # get the ending time and calculate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n console.append(\"> Data processed in \" + time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)) + \" seconds\")\n\n return fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence_val, comments, videos, author, comm_time\n\n\n" }, { "alpha_fraction": 0.6408947706222534, "alphanum_fraction": 0.6937070488929749, "avg_line_length": 65.09815979003906, "blob_id": "62c865ebb2df5d69acf80d5d311d87ffad53d2d6", "content_id": "743fa5f76fa9c4f4327aac736066e2c6ae824dd8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 11088, "license_type": "permissive", "max_line_length": 276, "num_lines": 163, "path": "/README.md", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "# Consumer perception of online multimedia\n\n## Description\nThis is a bachelor's degree application. It was created to analyze Youtube videos' metadata to create statistics about a specific key phrase search.\nIt can determine the impact of user comments on videos. It is helpful for the content creators of the Youtube platform. \n\n<img src=\"./docs/images/users_perception.png\" alt=\"Consumer perception image\" width=\"500\">\n\n## Components\nThe main components of the application are:\n1. Web Crawler\n\t- search videos on youtube based on a keyphrase\n1. Data Storage\n\t- store data to mongoDB\n1. Data Processing\n\t- preprocess data to remove noise\n1. Data Analysis\n\t- analyse extracted data\n\t- machine learning classifiers:\n\t\t* Multinomial Naive Bayes\n\t\t* Logistic Regression\n\t\t* Support Vector Machines\n\t- lexicon based dictionaries:\n\t\t* VADER\n\t\t* ANEW\n\t- voting system (using all classifiers)\n1. User interface\n\t- interface for user to search and view data\n1. Data display\n\t- create the graphs for displaying data\n\n## Structure\n```\nyoutube_sentiment_analysis\n├── docs\n| └── images\n| └── users_perception.png\n├── venv\n├── youtube_sentiment_analysis\n| ├── data\n| | └── classifiers\n| | | ├── logistic_regression.pickle\n| | | ├── multinomial_naive_bayes.pickle\n| | | └── nu_svc.pickle\n| ├── modules\n| | ├── __init__.py\n| | ├── accuracy.py\n| | ├── analysis.py\n| | ├── crawler.py\n| | ├── display.py\n| | ├── interface.py\n| | ├── process.py\n| | ├── store.py\n| | ├── training.py\n| | └── vote_classifier.py\n| ├── __init__.py\n| └── __main__.py\n├── LICENSE\n├── README.md\n├── requirements.txt\n└── setup.py\n```\n\n## Accuracy of the classifiers\n\n### Dataset with 10,000 files from IMDB\n| Classifier | Accuracy (%) | Standard deviation |\n| ------------------------- | ------------- | ------------------ |\n| Multinomial NB | 85.66 | 0.0065\t\t |\n| Logistic Regression | 87.30 | 0.0107 |\n| Nu SVC | 88.23 | 0.0078 |\n| VADER\t\t\t | 69.36\t | 0.1638\t\t |\n| ANEW\t\t\t | 67.54\t | 0.1313\t\t |\n| VOTING\t\t | 88.88\t | 0.0977\t\t |\n\n*Training time was obtained on a virtual machine with Debian 9.6 OS, with the specifications: \n(CPU Intel Core i7-2600 (4 cores), RAM 8GB DDR3, GPU AMD Radeon RX580)\n\n## Chart Legend\n - horizontal axis \t-> valence (from voting system)\n - vertical axis\t-> arousal (from ANEW)\n - Colour \t-> the confidence of classifiers\n - Size \t-> the number of likes\n\n## New features to implement\n - bigger dataset (more than 50000 files containing comments);\n - scatter point with details on hover;\n - on y-axis will be added activity of the user that write the comment;\n - points size will be based on their likes.\n \n## Resources\n### 1. Academic papers\n- [1] Flora Amato, Aniello Castiglione, Fabio Mercorio, Mario Mezzanzanica, Vincenzo Moscato, Antonio Picariello, Giancarlo Sperlì, „Multimedia story creation on social networks”, Elsevier, vol. 86, pp. 412-420, 2018. \n- [2] Bo Han, „Improving the Utility of Social Media with Natural Language Processing”, The University of Melbourne, PhD Thesis, pp. 1-198, 2014. \n- [3] Androniki Sapountzi, Kostas E. Psannis, „Social networking data analysis tools & challenges”, Elsevier, vol. 86, pp. 893-913, 2016. \n- [4] Ratab Gulla, Umar Shoaiba, Saba Rasheed, Washma Abid, Beenish Zahoor, „Pre Processing of Twitter's Data for Opinion Mining in Political Context”, Elsevier, vol. 96, pp. 1560-1570, 2016. \n- [5] Christopher G. Healey, Tweet Sentiment Visualization [Online], Disponibil la adresa: https://www.csc2.ncsu.edu/faculty/healey/tweet_viz/, Accesat: 2018. \n- [6] Google, Natural Language [Online], Disponibil la adresa: https://cloud.google.com/naturallanguage/, Accesat: 2019. \n- [7] Microsoft, Text Analytics [Online], Disponibil la adresa: https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/, Accesat: 2019. \n- [8] R. Piryani, D. Madhavi, V.K. Singh, „Analytical mapping of opinion mining and sentiment analysis research during 2000–2015”, Elsevier, vol. 53, pp. 122-150, 2016. \n- [9] Stuart J. Russell, Peter Norvig, „Artificial Intelligence A Modern Approach Third Edition”, Pearson Education, Inc., 2010. \n- [10] Diksha Khurana, Aditya Koli, Kiran Khatter and Sukhdev Singh, „Natural Language Processing: State of The Art, Current Trends and Challenges”, ResearchGate, Vol. 1, pp. 1-25, 2017. \n- [11] Christopher D. Manning, Prabhakar Raghavan, Hinrich Schütze, „An Introduction to Information Retrieval”, Cambridge University Press, 2009. \n- [12] Bing Liu, „Sentiment Analysis and Opinion Mining”, Morgan&Claypool, 2012. \n- [13] Bing Liu, „Sentiment Analysis and Subjectivity”, ResearchGate, vol. 26, pp. 627-666, 2010. \n- [14] Judith Hurwitz, Daniel Kirsch, „Machine Learning For Dummies”, John Wiley & Sons, Inc, 2018. \n- [15] Peter Jeffcock (ORACLE), What's the Difference Between AI, Machine Learning, and Deep Learning? [Online], Disponibil la adresa: https://blogs.oracle.com/bigdata/difference-ai-machinelearning-deep-learning, Accesat: 2019. \n- [16] K. Ming Leung, „Naive Bayesian Classifier”, POLYTECHNIC UNIVERSITY, University Course, pp. 1-16, 2007. \n- [17] Sona Taheri, Musa Mammadov, „Learning the Naive Bayes Classifier with optimization models”, International Journal of Applied Mathematics and Computer Science, vol. 23, pp. 787–795, 2013. \n- [18] Rennie, Jason D. M. and Shih, Lawrence and Teevan, Jaime and Karger, David R., „Tackling the Poor Assumptions of Naive Bayes Text Classiers”, AAAI Press, ICML'03, pp. 616-623, 2003. \n- [19] Harry Zhang, „The Optimality of Naive Bayes”, Proceedings of the , Seventeenth International Florida Artificial Intelligence Research Society Conference, pp. 1-6, 2004. \n- [20] Scikit Learn, Generalized Linear Models [Online], Disponibil la adresa: https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression, Accesat: 2019. \n- [21] NCSS, „Logistic Regression”, NCSS Statistical Software, Capitolul 321, pp. 1-69, 2019. \n- [22] Longjian Liu, „Biostatistical Basis of Inference in Heart Failure Study”, Longjian Liu, Heart Failure: Epidemiology and Research Methods, pp. 43-82, 2018. \n- [23] Andrew Ng, „Support Vector Machines”, Stanford, CS229: Machine Learning, pp. 1-25, 2018.\n- [24] Scikit Learn, Support Vector Machines [Online], Disponibil la adresa: https://scikit-learn.org/stable/modules/svm.html#svm-kernels, Accesat: 2019. \n- [25] Maite Taboada, „Sentiment Analysis: An Overview from Linguistics”, Simon Fraser University, Annual Review of Applied Linguistics, pp. 1-52, 2016. \n- [26] Prabu Palanisamy, Vineet Yadav, Harsha Elchuri, „Serendio: Simple and Practical lexicon based approach to Sentiment Analysis”, Association for Computational Linguistics, Second Joint Conference on Lexical and Computational Semantics, pp. 543–548, 2013. \n- [27] C.J. Hutto, Eric Gilbert, „VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text”, Proceedings of the , Eighth International AAAI Conference on Weblogs and Social Media, pp. 1-10, 2015. \n- [28] James A Russell, „A Circumplex Model of Affect”, Journal of Personality and Social Psychology, Vol. 39, pp. 1161-1178, 1980. \n- [29] Margaret M. Bradley, Peter J. Lang, „Affective Norms for English Words (ANEW): Instruction Manual and Affective Ratings”, NIMH Center for Emotion and Attention, Vol. 1, pp. 1-48, 1999. \n- [30] Amy Beth Warriner, Victor Kuperman, Marc Brysbaert, „Norms of valence, arousal, and dominance for 13,915 English lemmas”, Behavior Research Methods, Vol. 45, pp. 1191–1207, 2013. \n- [31] IBM, Three-tier architectures [Online], Disponibil la adresa: https://www.ibm.com/support/knowledgecenter/en/SSAW57_9.0.0/com.ibm.websphere.nd.multipla tform.doc/ae/covr_3-tier.html, Accesat: 2019. \n- [32] Carlos Castillo, „Effective Web Crawling”, University of Chile, PhD, pp. 1-164, 2004. \n- [33] Gerald Petz, Michał Karpowicz, Harald Fürschuß, Andreas Auinger, Václav Strítesky, Andreas Holzinger, „Reprint of: Computational approaches for mining user’sopinions on the Web 2.0”, Elsevier, vol. 51, pp. 510-519, 2015. \n- [34] Sam Henry, Clint Cuffy, Bridget T. McInnes, „Vector representations of multi-word terms for semantic relatedness”, Journal of Biomedical Informatics, Vol. 77, pp. 111-119, 2017. \n- [35] Gareth James, „Majority Vote Classifiers: Theory and applications”, STANFORD UNIVERSITY, PhD Thesis, pp. 1-98, 1998. \n- [36] Dave Kuhlman, „A Python Book: Beginning Python, Advanced Python and Python Exercises”, Platypus Global Media, 2013. \n- [37] Shiliang Sun, Chen Luo, Junyu Chen, „A review of natural language processing techniques for opinion mining systems”, Elsevier, vol. 36, pp. 10-25, 2016. \n- [38] Steven Bird, Ewan Klein, Edward Loper, „Natural Language Processing with Python”, O’Reilly Media, Inc., 2009. \n- [39] Fabian Pedregosa, Gaël Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel, Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, Jake Vanderplas, Alexandre Passos, David Cournapeau, Matthieu Perrot, Édouard Duchesnay, „Scikit-learn: Machine Learning in Python”, Journal of Machine Learning Research, 12, pp. 2825-2830, 2011. \n- [40] PyQt, PyQt Reference Guide [Online], Disponibil la adresa: https://www.riverbankcomputing.com/ static/Docs/PyQt5/, Accesat: 2019. \n- [41] Maas, Andrew L. and Daly, Raymond E. and Pham, Peter T. and Huang, Dan and Ng, Andrew Y. and Potts, Christopher, „Learning Word Vectors for Sentiment Analysis”, Association for Computational Linguistics, vol. 1, pp. 142-150, 2011. \n- [42] Payam Refaeilzadeh, Lei Tang, Huan Liu, „Cross-Validation”, Springer, Encyclopedia of Database Systems, pp. 532-538, 2009. \n- [43] Aliaksei Severyn, Alessandro Moschitti, Olga Uryupina, Barbara Plank, Katja Filippova, „Multilingual opinion mining on YouTube”, Elsevier, vol. 52, pp. 46-60, 2015.\n\n### 2. GitHub\n - [youtube-comment-downloader](https://github.com/egbertbouman/youtube-comment-downloader)\n - [VADER-Sentiment-Analysis](https://github.com/cjhutto/vaderSentiment)\n \n### 3. Python Libraries\n - [VADER](https://www.nltk.org/_modules/nltk/sentiment/vader.html): Hutto, C.J. & Gilbert, E.E. (2014). VADER: A Parsimonious Rule-based Model for Sentiment Analysis of Social Media Text. Eighth International Conference on Weblogs and Social Media (ICWSM-14). Ann Arbor, MI, June 2014.\n - [NLTK](https://www.nltk.org/)\n - [SciKit-learn](https://scikit-learn.org/stable)\n - [WordCloud](https://amueller.github.io/word_cloud/)\n \n### 4. Dataset\n - [Kaggle](https://www.kaggle.com/iarunava/imdb-movie-reviews-dataset): Potts, Christopher. 2011. On the negativity of negation. In Nan Li and David Lutz, eds., Proceedings of Semantics and Linguistic Theory 20, 636-659.\n \n### 5. Tutorials\n - [Pythonprogramming - Natural language processing](https://pythonprogramming.net/tokenizing-words-sentences-nltk-tutorial/)\n - [DZone - Simple Sentiment Analysis With NLP](https://dzone.com/articles/simple-sentiment-analysis-with-nlp)\n \n## Existing products\n### 1. Academic\n - [Tweet Sentiment Visualization](https://www.csc2.ncsu.edu/faculty/healey/tweet_viz/tweet_app/)\n\n### 2. Commercial\n - [Google - Cloud Natural Language](https://cloud.google.com/natural-language/)\n - [Microsoft - Text Analytics API](https://docs.microsoft.com/en-us/azure/cognitive-services/text-analytics/overview) \n\n## License\nThe application is licensed under MIT License. It is made for academic use and is the subject of a bachelor's degree.\n" }, { "alpha_fraction": 0.6960985660552979, "alphanum_fraction": 0.7002053260803223, "avg_line_length": 32.620689392089844, "blob_id": "16e851c24ad55dcf69d7e1dedab1c3d36223b27b", "content_id": "002e1484357cf0d5e62a4c614ebfcd89adbec8c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 974, "license_type": "permissive", "max_line_length": 88, "num_lines": 29, "path": "/setup.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "from pip._internal.download import PipSession\nfrom pip._internal.req import parse_requirements\nfrom setuptools import setup, find_packages\n\nfrom youtube_sentiment_analysis import __version__ as version\n\nrequirements = [\n str(req.req) for req in parse_requirements('requirements.txt', session=PipSession())\n]\n\nsetup(\n name=\"youtube_sentiment_analysis\",\n version=version,\n author=\"Alexandru Grigoras\",\n author_email=\"[email protected]\",\n description=\"Youtube Sentiment Analysis\",\n url=\"https://bitbucket.org/grigorasalex/youtube_sentiment_analysis/src/master/\",\n packages=find_packages(),\n keywords='youtube search sentiment analysis',\n install_requires=requirements,\n zip_safe=True,\n classifiers=[\n 'Development Status :: 1.0 - Release',\n \"Programming Language :: Python :: 3.6\",\n \"Artificial Intelligence :: Natural Language Processing\",\n \"Crawler :: Youtube metadata crawler\",\n \"Operating System :: OS Independent\",\n ],\n)" }, { "alpha_fraction": 0.5462127923965454, "alphanum_fraction": 0.5496737360954285, "avg_line_length": 33.49119186401367, "blob_id": "6fb542ed45e0dccb7d34be434084eb5d4b695e46", "content_id": "59ad0ab88de99f0833ef16d8397e423461b591d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 17625, "license_type": "permissive", "max_line_length": 131, "num_lines": 511, "path": "/youtube_sentiment_analysis/modules/crawler.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Web crawler module\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport json\nimport multiprocessing\nimport queue\nimport time\nimport urllib.request\nfrom urllib import robotparser\n\nimport lxml.html\nimport requests\nfrom bs4 import BeautifulSoup\nfrom lxml.cssselect import CSSSelector\n\nfrom youtube_sentiment_analysis.modules.store import StoreData\n\n# Constants\n__all__ = ['WebCrawler']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\nYOUTUBE_URL_SEARCH = \"https://www.youtube.com/results?search_query=\"\nYOUTUBE_PAGE_ARG = \"&page=\"\nYOUTUBE_URL = \"https://www.youtube.com\"\nYOUTUBE_COMMENTS_URL = 'https://www.youtube.com/all_comments?v={youtube_id}'\nYOUTUBE_COMMENTS_AJAX_URL = 'https://www.youtube.com/comment_ajax'\nUSER_AGENT = 'AG_SENTIMENT_ANALYSIS_BOT'\n\n\nclass Video:\n \"\"\"Video class that contains the parameters of a video\"\"\"\n\n def __init__(self, title, link, description, likes=None, dislikes=None):\n \"\"\"Class constructor\"\"\"\n self.__title = title\n self.__link = link\n self.__video_id = None\n self.__description = description\n self.__likes = likes\n self.__dislikes = dislikes\n\n def set_title(self, title):\n \"\"\"Sets the title\"\"\"\n self.__title = title\n\n def set_link(self, link):\n \"\"\"Sets the link\"\"\"\n self.__link = link\n\n def set_id(self, video_id):\n \"\"\"Sets the id\"\"\"\n self.__video_id = video_id\n\n def set_description(self, description):\n \"\"\"Sets the description\"\"\"\n self.__description = description\n\n def set_likes(self, likes):\n \"\"\"Sets the likes\"\"\"\n self.__likes = likes\n\n def set_dislikes(self, dislikes):\n \"\"\"Sets the dislikes\"\"\"\n self.__dislikes = dislikes\n\n def get_title(self):\n \"\"\"Returns the title\"\"\"\n return self.__title\n\n def get_link(self):\n \"\"\"Returns the link\"\"\"\n return self.__link\n\n def get_id(self):\n \"\"\"Returns the id\"\"\"\n return self.__video_id\n\n def get_description(self):\n \"\"\"Returns the description\"\"\"\n return self.__description\n\n def get_likes(self):\n \"\"\"Returns the likes\"\"\"\n return self.__likes\n\n def get_dislikes(self):\n \"\"\"Returns the dislikes\"\"\"\n return self.__dislikes\n\n def display(self, fp=None):\n \"\"\"Displays the video data\"\"\"\n space = \" \"\n try:\n print(\"\\t> Title: \" + self.__title, file=fp)\n except IOError:\n print(space + \"> Invalid title!\", file=fp)\n try:\n print(space + \"Link: \" + self.__link, file=fp)\n except IOError:\n print(space + \"> Invalid link!\", file=fp)\n try:\n print(space + \"Description: \" + self.__description, file=fp)\n except IOError:\n print(space + \"> Invalid description!\", file=fp)\n try:\n print(space + \"Like: %s, Dislike: %s\" % (self.__likes, self.__dislikes), file=fp)\n except IOError:\n print(space + \"> No likes / dislikes!\", file=fp)\n\n\nclass RobotParser:\n \"\"\"Robot parser class to check the crawling rules on the domain and links\"\"\"\n\n def __init__(self):\n \"\"\"Class constructor\"\"\"\n self.__rp = robotparser.RobotFileParser()\n\n def parse_domain(self):\n \"\"\"Parse the domain for robot rules\"\"\"\n self.__rp.set_url(YOUTUBE_URL + \"/robots.txt\")\n self.__rp.read()\n\n r_rate = self.__rp.request_rate(\"*\")\n if r_rate is not None:\n requests_nr = r_rate.requests\n request_sec = r_rate.seconds\n #print(\"> Parameters: \")\n #print(\"\\t - request-rate: \" + str(requests_nr) + \"/\" + str(request_sec) + \"s\")\n\n # TO DO: add other parameters to test\n\n def can_extract(self, link):\n \"\"\"Checks the link to validate the crawling permission\"\"\"\n return self.__rp.can_fetch(\"*\", link)\n\n\nclass Downloader(multiprocessing.Process):\n \"\"\"Worker for downloading data from every video in the list\"\"\"\n\n def __init__(self, name, keyword, videos_list):\n multiprocessing.Process.__init__(self)\n self.__keyword = keyword\n self.__videos_list = videos_list\n self.__exception = None\n self.__mongo_conn = None\n\n print(\"> Initialize worker \" + name + \" with \" + str(len(videos_list)) + \" videos\")\n\n def run(self):\n self.__mongo_conn = StoreData(self.__keyword, store=True)\n\n try:\n # search every video for metadata\n for video in self.__videos_list:\n try:\n print(\"> Crawling \" + video.get_title())\n self.__video_process(video)\n except AttributeError:\n print(\"> Extracted data from video is invalid (AttributeError)!\")\n except IndexError:\n print(\"> Extracted data from video is invalid (IndexError)!\")\n except Exception as e:\n self.__exception = e\n\n def __video_process(self, video):\n \"\"\"Process every video to find links\"\"\"\n video_id_path = video.get_link()\n video_id = video_id_path.replace(\"/watch?v=\", \"\")\n url_video = YOUTUBE_URL + video_id_path\n\n rp = RobotParser()\n\n rp.parse_domain()\n\n if rp.can_extract(url_video) is False:\n print(\"> Page cannot be crawled: \" + url_video)\n return\n\n headers = {'User-Agent': USER_AGENT}\n req = urllib.request.Request(url_video, headers=headers)\n search_content = urllib.request.urlopen(req)\n search_content_html = BeautifulSoup(search_content, 'lxml')\n\n try:\n like = search_content_html.findAll('button', {\"class\": \"like-button-renderer-like-button\"})\n likes = like[0].span.text\n except IndexError:\n likes = 0\n try:\n dislike = search_content_html.findAll('button', {\"class\": \"like-button-renderer-dislike-button\"})\n dislikes = dislike[0].span.text\n except IndexError:\n dislikes = 0\n\n # create a video\n video.set_link(url_video)\n video.set_likes(likes)\n video.set_dislikes(dislikes)\n video.set_id(video_id)\n\n if video_id_path.find(\"channel\") is -1 and video_id_path.find(\"user\") is -1:\n self.__metadata_extractor(video)\n elif video_id_path.find(\"channel\") is not -1:\n print(\"> \" + video.get_title() + \" is a channel\")\n elif video_id_path.find(\"user\") is not -1:\n print(\"> \" + video.get_title() + \" is a user\")\n else:\n print(\"> \" + video.get_title() + \" is unknown\")\n\n def __metadata_extractor(self, video):\n \"\"\"Extracts the data from video\"\"\"\n count = self.__download_comments(video)\n\n print('> Downloading ' + str(count) + ' comments for video: ', video.get_title(), ' (', video.get_id(), ')')\n\n def __download_comments(self, video=None, sleep=0):\n \"\"\"Extract comments from video\"\"\"\n session = requests.Session()\n session.headers['User-Agent'] = USER_AGENT\n\n # get Youtube page with initial comments\n response = session.get(YOUTUBE_COMMENTS_URL.format(youtube_id=video.get_id()))\n html = response.text\n reply_comments = self.__extract_reply_comments(html)\n\n nr_comments = 0\n\n nr_comments += self.__extract_comments(html, video)\n\n page_token = self.__find_token(html, 'data-token')\n session_token = self.__find_token(html, 'XSRF_TOKEN', 4)\n\n first_iteration = True\n\n # get remaining comments\n while page_token:\n data = {'video_id': video.get_id(),\n 'session_token': session_token}\n\n params = {'action_load_comments': 1,\n 'order_by_time': True,\n 'filter': video.get_id()}\n\n if first_iteration:\n params['order_menu'] = True\n else:\n data['page_token'] = page_token\n\n response = self.__ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)\n if not response:\n break\n\n page_token, html = response\n\n reply_comments += self.__extract_reply_comments(html)\n nr_comments += self.__extract_comments(html, video)\n\n first_iteration = False\n time.sleep(sleep)\n\n # get replies\n for cid in reply_comments:\n data = {'comment_id': cid,\n 'video_id': video.get_id(),\n 'can_reply': 1,\n 'session_token': session_token}\n\n params = {'action_load_replies': 1,\n 'order_by_time': True,\n 'filter': video.get_id(),\n 'tab': 'inbox'}\n\n response = self.__ajax_request(session, YOUTUBE_COMMENTS_AJAX_URL, params, data)\n if not response:\n break\n\n _, html = response\n\n nr_comments += self.__extract_comments(html, video)\n\n time.sleep(sleep)\n\n return nr_comments\n\n @staticmethod\n def __extract_reply_comments(html):\n \"\"\"Get comments from replies\"\"\"\n tree = lxml.html.fromstring(html)\n sel = CSSSelector('.comment-replies-header > .load-comments')\n return [i.get('data-cid') for i in sel(tree)]\n\n def __extract_comments(self, html, video):\n \"\"\"Extracts comments from html using CSSSelector to find specific classes\"\"\"\n tree = lxml.html.fromstring(html)\n item_sel = CSSSelector('.comment-item')\n text_sel = CSSSelector('.comment-text-content')\n time_sel = CSSSelector('.time')\n author_sel = CSSSelector('.user-name')\n like_sel = CSSSelector('.like-count')\n\n nr_comments = 0\n\n for item in item_sel(tree):\n self.__mongo_conn.write(video,\n item.get('data-cid'),\n text_sel(item)[0].text_content(),\n time_sel(item)[0].text_content().strip(),\n author_sel(item)[0].text_content(),\n like_sel(item)[0].text_content(),\n )\n nr_comments += 1\n\n return nr_comments\n\n @staticmethod\n def __find_token(html, key, num_chars=2):\n \"\"\"Find start and end position of a key\"\"\"\n begin = html.find(key) + len(key) + num_chars\n end = html.find('\"', begin)\n\n return html[begin: end]\n\n @staticmethod\n def __ajax_request(session, url, params, data, retries=1, sleep=0):\n \"\"\"Ajax request to scroll page\"\"\"\n for _ in range(retries):\n response = session.post(url, params=params, data=data)\n if response.status_code == 200:\n response_dict = json.loads(response.text)\n return response_dict.get('page_token', None), response_dict['html_content']\n else:\n time.sleep(sleep)\n\n def get_exception(self):\n \"\"\"Returns the generated exception\"\"\"\n return self.__exception\n\n\nclass WebCrawler:\n \"\"\"Search on youtube by chosen keyword and find all videos to download comments\"\"\"\n\n def __init__(self, keyword, nr_videos, crawl_delay):\n \"\"\"Class constructor\"\"\"\n\n self.__keyword = keyword\n self.__nr_videos = nr_videos\n self.__crawl_delay = crawl_delay\n self.__videos_queue = queue.Queue(maxsize=1000)\n\n def run(self, console):\n \"\"\"Method that runs the web crawler on YouTube with the specified keyword for search\"\"\"\n\n # get the starting time\n start_time = time.time()\n\n # start the main processing based on arguments\n self.__search_pages(console)\n\n # get the finish time and calculate the script execution time\n end_time = time.time()\n elapsed_time = end_time - start_time\n console.append(\"> Data extracted in \" + time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)) + \" seconds\")\n\n return True\n\n def __search_pages(self, console):\n \"\"\"Searches into YouTube results pages to find videos\"\"\"\n\n # beginning search__get_videos\n console.append(\"> Searching for: \" + self.__keyword)\n\n # check limit of pages and videos\n if self.__nr_videos < 1:\n console.append(\"> The number of videos should be > 0\")\n elif self.__nr_videos == 1:\n console.append(\"> Limit the search to: \" + str(self.__nr_videos) + \" video\")\n else:\n console.append(\"> Limit the search to: \" + str(self.__nr_videos) + \" videos\")\n\n extracted_videos = 0\n max_nr_pages = 50\n current_page = 0\n\n # add the videos to the queue\n while extracted_videos < self.__nr_videos and current_page <= max_nr_pages:\n current_page += + 1\n url = YOUTUBE_URL_SEARCH + self.__keyword.replace(\" \", \"%20\") + YOUTUBE_PAGE_ARG + str(current_page)\n\n try:\n value = self.__get_videos(url, self.__nr_videos, console)\n if value is -1:\n console.append(\"> There isn't an internet connection! Please connect to the internet to get data from videos!\")\n print(\"no internet\")\n extracted_videos += value\n except Exception:\n console.append(\"> There isn't an internet connection! Please connect to the internet to get data from videos!\")\n return\n\n time.sleep(self.__crawl_delay)\n\n # get the number of threads\n nr_threads = multiprocessing.cpu_count()\n\n # page threads list\n page_processes = []\n\n # calculate the number of videos for each thread to be processed\n nr_videos_in_queue = self.__videos_queue.qsize()\n console.append(\"> Number of videos found: \" + str(nr_videos_in_queue))\n\n if nr_videos_in_queue == 0:\n print(\"> Cannot run crawling with no videos!\")\n console.append(\"> Cannot run crawling with no videos!\")\n\n # create the workers to process the videos\n if nr_videos_in_queue <= nr_threads:\n for i in range(0, nr_videos_in_queue, 1):\n videos_list = []\n if self.__videos_queue.empty() is False:\n videos_list.append(self.__videos_queue.get())\n process = Downloader(str(i), self.__keyword, videos_list)\n page_processes.append(process)\n else:\n video_per_thread = int(nr_videos_in_queue / nr_threads)\n remaining_videos = nr_videos_in_queue % nr_threads\n\n for i in range(0, nr_threads, 1):\n videos_list = []\n index = 0\n\n if remaining_videos > 0:\n total_videos = video_per_thread + 1\n remaining_videos -= 1\n else:\n total_videos = video_per_thread\n\n while self.__videos_queue.empty() is False and index < total_videos:\n videos_list.append(self.__videos_queue.get())\n index += 1\n process = Downloader(str(i), self.__keyword, videos_list)\n page_processes.append(process)\n\n # start the workers\n for process in page_processes:\n process.start()\n\n # wait for each worker to finish the processing\n for process in page_processes:\n process.join()\n\n # check if there where any errors on workers\n for t in page_processes:\n e = t.get_exception()\n if e:\n console.append(\"> Error on process:\" + e)\n\n def __get_videos(self, url, max_nr_videos=None, console=None):\n \"\"\"Finds the videos in the selected YouTube page\"\"\"\n\n # set header for request\n headers = {'User-Agent': USER_AGENT}\n req = urllib.request.Request(url, headers=headers)\n\n try:\n # send request\n try:\n search_result = urllib.request.urlopen(req)\n except urllib.error.URLError:\n print('Cannot make request')\n return -1\n \n soup = BeautifulSoup(search_result, 'lxml')\n description = soup.findAll('div', {\"class\": \"yt-lockup-description\"})\n title_link = soup.findAll('a', {\"class\": \"yt-uix-tile-link\"})\n\n # check the number of videos\n if len(title_link) == 0:\n return 0\n\n if max_nr_videos:\n selected_nr_videos = max_nr_videos\n else:\n selected_nr_videos = len(title_link)\n\n # search every video for metadata\n for video in range(0, selected_nr_videos, 1):\n try:\n # put the video in the queue\n current_video = Video(title_link[video]['title'],\n title_link[video]['href'],\n description[video].text)\n self.__videos_queue.put(current_video)\n except AttributeError:\n console.append(\"> Video cannot be put to queue (AttributeError)!\")\n except IndexError:\n console.append(\"> Video cannot be put to queue (IndexError)!\")\n\n # returns the number of the videos found\n return len(title_link)\n\n except urllib.error.HTTPError:\n console.append(\"> HTTP request error: Too many requests\")\n return 0\n" }, { "alpha_fraction": 0.49528834223747253, "alphanum_fraction": 0.4983038008213043, "avg_line_length": 29.494253158569336, "blob_id": "1a574d7985cfd0e3c50775dfe59330c25847ba61", "content_id": "b8243b355a07c64ec66c7a6abb9a8f4fb75eaf92", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2653, "license_type": "permissive", "max_line_length": 102, "num_lines": 87, "path": "/youtube_sentiment_analysis/modules/store.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Store data module\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport pymongo\n\n# Constants\n__all__ = ['StoreData']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\nDATABASE_NAME = \"sentiment_analysis\"\n\n\nclass StoreData:\n \"\"\"Class for storing the data to temporary files or MongoDB database\"\"\"\n\n def __init__(self, keyword=None, store=True, console=None):\n \"\"\"Class constructor\"\"\"\n\n try:\n self.__my_client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n except Exception:\n if console:\n console.append(\"> MongoDB database connection is closed or MongoDB is not installed!\")\n return\n\n self.__db_list = self.__my_client.list_database_names()\n\n if DATABASE_NAME not in self.__db_list:\n if console:\n console.append(\"> Database \" + DATABASE_NAME + \" does not exists. Creating it!\")\n\n self.__my_db = self.__my_client[\"sentiment_analysis\"]\n\n self.__col_list = self.__my_db.list_collection_names()\n if keyword:\n if keyword not in self.__col_list:\n if not store:\n if console:\n console.append(\"> Collection does not exist! Extract data first!\")\n exit()\n if console:\n console.append(\"> Collection \" + keyword + \" does not exists. Creating it!\")\n\n self.__my_col = self.__my_db[keyword]\n\n def write(self, video, cid, text, time, author, nr_likes):\n \"\"\"Write data on the database\"\"\"\n\n my_query = {\n '_id': video.get_id() if video else \"\",\n 'title': video.get_title() if video else \"\",\n 'description': video.get_description() if video else \"\",\n 'nr_likes': video.get_likes() if video else \"\",\n 'nr_dislikes': video.get_dislikes() if video else \"\",\n }\n new_values = {\n \"$addToSet\":\n {\n 'comments':\n {\n 'cid': cid,\n 'text': text,\n 'time': time,\n 'author': author,\n 'nr_likes': nr_likes,\n }\n }\n }\n self.__my_col.update_one(my_query, new_values, upsert=True)\n\n def read(self):\n \"\"\"Read data from mongodb database\"\"\"\n\n return self.__my_col.find()\n\n def get_collections(self):\n \"\"\"Get the collections from database\"\"\"\n\n return self.__col_list\n" }, { "alpha_fraction": 0.660516619682312, "alphanum_fraction": 0.6678966879844666, "avg_line_length": 19.846153259277344, "blob_id": "79d6429422260306ffa490d91bfdc0a70e2fb2dc", "content_id": "05c5404428c584258503f71ce463587052e8cd63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 542, "license_type": "permissive", "max_line_length": 85, "num_lines": 26, "path": "/youtube_sentiment_analysis/__main__.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Youtube Sentiment Analysis - Main module\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication\n\nfrom youtube_sentiment_analysis.modules.interface import SentimentAnalysisApplication\n\n# Constants\n__all__ = []\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n\nif __name__ == \"__main__\":\n \"\"\"Main function that starts the application\"\"\"\n\n app = QApplication(sys.argv)\n window = SentimentAnalysisApplication()\n window.show()\n sys.exit(app.exec_())\n" }, { "alpha_fraction": 0.5947105288505554, "alphanum_fraction": 0.6000714898109436, "avg_line_length": 30.426965713500977, "blob_id": "6d59d1ec6d5e511b5d3ce2f8ea521865f08de67f", "content_id": "7ebae5aaec02e5dd52b8a7ffab3c6a4cf931d3c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2798, "license_type": "permissive", "max_line_length": 107, "num_lines": 89, "path": "/youtube_sentiment_analysis/modules/vote_classifier.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Voting system for classifiers\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nfrom statistics import mean\nfrom statistics import mode\nfrom nltk.sentiment import SentimentIntensityAnalyzer\nfrom youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew\n\n# Constants\n__all__ = ['VoteClassifier']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\n\nclass VoteClassifier:\n \"\"\"Voting system for classifiers for selecting the most modules sentiment from a list on classifiers\"\"\"\n\n def __init__(self, classifiers):\n \"\"\"Class constructor\"\"\"\n self.__classifiers = classifiers # ml classifiers\n self.__sid = SentimentIntensityAnalyzer() # vader classifier\n\n def classify(self, comment_text, pd):\n \"\"\"Returns the mean value of the classifiers results\"\"\"\n votes = []\n\n # get ML classifiers results\n for c in self.__classifiers:\n pos = c.prob_classify(pd.get_word_feature()).prob('pos')\n neg = c.prob_classify(pd.get_word_feature()).prob('neg')\n votes.append(float(pos - neg))\n\n # get Vader result\n ss = self.__sid.polarity_scores(comment_text)\n votes.append(ss[\"compound\"])\n\n # get ANEW result\n anew_result = anew.sentiment(pd.get_tokens())['valence']\n votes.append(self.map(anew_result, 0, 10, -1, 1))\n\n return mean(votes)\n\n def confidence(self, comment_text, pd):\n \"\"\"Returns the confidence of the result\"\"\"\n votes = []\n\n # get ML classifiers result\n for c in self.__classifiers:\n v = c.classify(pd.get_word_feature())\n votes.append(v)\n\n # get Vader result\n ss = self.__sid.polarity_scores(comment_text)\n if ss[\"compound\"] >= 0:\n votes.append(\"pos\")\n else:\n votes.append(\"neg\")\n\n # get ANEW result\n anew_result = anew.sentiment(pd.get_tokens())['valence']\n if anew_result >= 5.8:\n votes.append(\"pos\")\n else:\n votes.append(\"neg\")\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / float(len(votes))\n\n return conf\n\n @staticmethod\n def map(value, left_min, left_max, right_min, right_max):\n \"\"\"Maps a value from one interval [left_min, left_max] to another [right_min, right_max]\"\"\"\n # Figure out how 'wide' each range is\n left_span = left_max - left_min\n right_span = right_max - right_min\n\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - left_min) / float(left_span)\n\n # Convert the 0-1 range into a value in the right range.\n return right_min + (value_scaled * right_span)\n\n" }, { "alpha_fraction": 0.5482308864593506, "alphanum_fraction": 0.5512104034423828, "avg_line_length": 28.505495071411133, "blob_id": "43991443317c73459dbf3a4f9a77fb64d5fd70a2", "content_id": "00ff0f3892d60c63c9bd472a13dddb5e0451ed5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2685, "license_type": "permissive", "max_line_length": 105, "num_lines": 91, "path": "/youtube_sentiment_analysis/modules/process.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Process data module\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport re\nfrom nltk.corpus import stopwords\n\n# Constants\n__all__ = ['ProcessData']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\n\nclass ProcessData:\n \"\"\"Class for pre-processing the the for the analysis module\"\"\"\n\n def __init__(self):\n \"\"\"Class constructor\"\"\"\n\n self.__all_tokens = []\n self.__tokens = []\n\n def process_text(self, text):\n \"\"\"Process the text by filtering it and removing unwanted characters\"\"\"\n\n self.__tokens.clear()\n\n tokenize = [t.lower().strip(\":,.!?\") for t in text.split()]\n\n filtered_tokens = self.__filter_text(tokenize)\n\n self.__tokens.extend(filtered_tokens)\n self.__all_tokens.extend(filtered_tokens)\n\n @staticmethod\n def __filter_text(tokens):\n \"\"\"Pre-process comments to remove irrelevant data\n Takes in a string of text, then performs the following:\n 1. Remove all punctuation\n 2. Remove all stopwords\n 3. Remove other characters\n 4. Return the cleaned text as a list of words\"\"\"\n\n stopwords_english = stopwords.words('english')\n custom_stopwords = []\n\n hashtags = [w for w in tokens if w.startswith('#')]\n ghashtags = [w for w in tokens if w.startswith('+')]\n mentions = [w for w in tokens if w.startswith('@')]\n links = [w for w in tokens if w.startswith('http') or w.startswith('www')]\n filtered_tokens = [w for w in tokens\n if w not in stopwords_english\n and w not in custom_stopwords\n and w.isalpha()\n and not len(w) < 3\n and w not in hashtags\n and w not in ghashtags\n and w not in links\n and w not in mentions]\n\n return filtered_tokens\n\n @staticmethod\n def __word_verify(word):\n \"\"\"Check if the word contains only letters\"\"\"\n\n if re.match(\"^[a-zA-Z_]*$\", word):\n return word.lower()\n else:\n return ''\n\n def get_tokens(self):\n \"\"\"Returns the filtered tokens of current process\"\"\"\n\n return self.__tokens\n\n def get_all_tokens(self):\n \"\"\"Returns all the filtered tokens\"\"\"\n\n return self.__all_tokens\n\n def get_word_feature(self, tokens=None):\n \"\"\"Get the word features from dictionary\"\"\"\n \n return dict([(self.__word_verify(word), True) for word in (tokens if tokens else self.__tokens)])\n" }, { "alpha_fraction": 0.6385079026222229, "alphanum_fraction": 0.6628990769386292, "avg_line_length": 50.73604202270508, "blob_id": "dcc347e0c9f93d041ddc9df137db3adcbd6cd427", "content_id": "e066b65ab87473be6a991f1e4c2005c65d0cee99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50961, "license_type": "permissive", "max_line_length": 120, "num_lines": 985, "path": "/youtube_sentiment_analysis/modules/interface.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n User interface\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5.QtGui import QStandardItemModel, QStandardItem\nfrom PyQt5.QtWidgets import QMainWindow, QMessageBox, QAbstractItemView\nfrom pymongo.errors import PyMongoError\n\nfrom youtube_sentiment_analysis.modules.accuracy import TestAccuracy\nfrom youtube_sentiment_analysis.modules.analysis import DataAnalysis\nfrom youtube_sentiment_analysis.modules.crawler import WebCrawler\nfrom youtube_sentiment_analysis.modules.display import DisplayData\nfrom youtube_sentiment_analysis.modules.store import StoreData\nfrom youtube_sentiment_analysis.modules.training import TrainClassifier\n\n# Constants\n__all__ = ['SentimentAnalysisApplication']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\n\nclass UIMainWindow(object):\n \"\"\"Main User Interface\"\"\"\n\n def setupUi(self, MainWindow):\n \"\"\"Setup the objects for ui\"\"\"\n\n # main window\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1366, 768)\n MainWindow.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)\n MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)\n\n #central widget\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidgetData = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidgetData.setGeometry(QtCore.QRect(390, 20, 951, 691))\n self.tabWidgetData.setObjectName(\"tabWidgetData\")\n\n # sentiment page\n self.sentiment = QtWidgets.QWidget()\n self.sentiment.setObjectName(\"sentiment\")\n self.tabWidgetData.addTab(self.sentiment, \"\")\n\n # heatmap page\n self.heatmap = QtWidgets.QWidget()\n self.heatmap.setObjectName(\"heatmap\")\n self.tabWidgetData.addTab(self.heatmap, \"\")\n\n # word frequency page\n self.word_frequency = QtWidgets.QWidget()\n self.word_frequency.setObjectName(\"word_frequency\")\n self.tabWidgetData.addTab(self.word_frequency, \"\")\n\n # word cloud page\n self.word_cloud = QtWidgets.QWidget()\n self.word_cloud.setObjectName(\"word_cloud\")\n self.tabWidgetData.addTab(self.word_cloud, \"\")\n\n # comments page\n self.comments = QtWidgets.QWidget()\n self.comments.setObjectName(\"comments\")\n self.tabWidgetData.addTab(self.comments, \"\")\n self.treeView = QtWidgets.QTreeView(self.comments)\n self.treeView.setGeometry(QtCore.QRect(30, 20, 891, 611))\n self.treeView.setObjectName(\"treeView\")\n\n # accuracy page\n self.accuracy = QtWidgets.QWidget()\n self.accuracy.setObjectName(\"accuracy\")\n self.tabWidgetData.addTab(self.accuracy, \"\")\n\n # settings page\n self.settings = QtWidgets.QWidget()\n self.settings.setObjectName(\"settings\")\n self.groupBoxComments = QtWidgets.QGroupBox(self.settings)\n self.groupBoxComments.setGeometry(QtCore.QRect(30, 50, 331, 151))\n self.groupBoxComments.setObjectName(\"groupBoxComments\")\n self.labelLikeMin = QtWidgets.QLabel(self.groupBoxComments)\n self.labelLikeMin.setGeometry(QtCore.QRect(30, 50, 171, 31))\n self.labelLikeMin.setObjectName(\"labelLikeMin\")\n self.labelLikeMax = QtWidgets.QLabel(self.groupBoxComments)\n self.labelLikeMax.setGeometry(QtCore.QRect(30, 90, 171, 31))\n self.labelLikeMax.setObjectName(\"labelLikeMax\")\n self.lineEditLikeMin = QtWidgets.QLineEdit(self.groupBoxComments)\n self.lineEditLikeMin.setGeometry(QtCore.QRect(230, 50, 81, 28))\n self.lineEditLikeMin.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.lineEditLikeMin.setObjectName(\"lineEditLikeMin\")\n self.lineEditLikeMax = QtWidgets.QLineEdit(self.groupBoxComments)\n self.lineEditLikeMax.setGeometry(QtCore.QRect(230, 90, 81, 28))\n self.lineEditLikeMax.setObjectName(\"lineEditLikeMax\")\n self.groupBoxTraining = QtWidgets.QGroupBox(self.settings)\n self.groupBoxTraining.setGeometry(QtCore.QRect(30, 240, 601, 321))\n self.groupBoxTraining.setObjectName(\"groupBoxTraining\")\n self.labelDataset = QtWidgets.QLabel(self.groupBoxTraining)\n self.labelDataset.setGeometry(QtCore.QRect(30, 50, 111, 31))\n self.labelDataset.setObjectName(\"labelDataset\")\n self.lineEditDataset = QtWidgets.QLineEdit(self.groupBoxTraining)\n self.lineEditDataset.setGeometry(QtCore.QRect(150, 50, 421, 28))\n self.lineEditDataset.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.lineEditDataset.setText(\"\")\n self.lineEditDataset.setObjectName(\"lineEditDataset\")\n self.pushButtonTrain = QtWidgets.QPushButton(self.groupBoxTraining)\n self.pushButtonTrain.setGeometry(QtCore.QRect(30, 160, 101, 31))\n self.pushButtonTrain.setObjectName(\"pushButtonTrain\")\n self.pushButtonTrain.clicked.connect(self.__on_click_train)\n self.labelDatasetLimit = QtWidgets.QLabel(self.groupBoxTraining)\n self.labelDatasetLimit.setGeometry(QtCore.QRect(30, 100, 111, 31))\n self.labelDatasetLimit.setObjectName(\"labelDatasetLimit\")\n self.lineEditDatasetLimit = QtWidgets.QLineEdit(self.groupBoxTraining)\n self.lineEditDatasetLimit.setGeometry(QtCore.QRect(150, 100, 131, 28))\n self.lineEditDatasetLimit.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.lineEditDatasetLimit.setText(\"200\")\n self.lineEditDatasetLimit.setObjectName(\"lineEditDatasetLimit\")\n self.lineEditDatasetKfold = QtWidgets.QLineEdit(self.groupBoxTraining)\n self.lineEditDatasetKfold.setGeometry(QtCore.QRect(150, 210, 131, 28))\n self.lineEditDatasetKfold.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.lineEditDatasetKfold.setText(\"10\")\n self.lineEditDatasetKfold.setObjectName(\"lineEditDatasetKfold\")\n self.labelDatasetKfold = QtWidgets.QLabel(self.groupBoxTraining)\n self.labelDatasetKfold.setGeometry(QtCore.QRect(30, 210, 111, 31))\n self.labelDatasetKfold.setObjectName(\"labelDatasetKfold\")\n self.pushButtonAccuracy = QtWidgets.QPushButton(self.groupBoxTraining)\n self.pushButtonAccuracy.setGeometry(QtCore.QRect(30, 260, 101, 31))\n self.pushButtonAccuracy.setObjectName(\"pushButtonAccuracy\")\n self.pushButtonAccuracy.clicked.connect(self.__on_click_accuracy)\n self.tabWidgetData.addTab(self.settings, \"\")\n\n # group box search\n self.groupBoxSearch = QtWidgets.QGroupBox(self.centralwidget)\n self.groupBoxSearch.setGeometry(QtCore.QRect(20, 20, 351, 101))\n self.groupBoxSearch.setObjectName(\"groupBoxSearch\")\n self.lineEditSearch = QtWidgets.QLineEdit(self.groupBoxSearch)\n self.lineEditSearch.setGeometry(QtCore.QRect(20, 40, 311, 41))\n self.lineEditSearch.setObjectName(\"lineEditSearch\")\n self.groupBoxAnalyse = QtWidgets.QGroupBox(self.centralwidget)\n self.groupBoxAnalyse.setGeometry(QtCore.QRect(20, 240, 351, 151))\n self.groupBoxAnalyse.setObjectName(\"groupBoxAnalyse\")\n\n # group box data\n self.groupBoxData = QtWidgets.QGroupBox(self.centralwidget)\n self.groupBoxData.setGeometry(QtCore.QRect(20, 130, 351, 101))\n self.groupBoxData.setObjectName(\"groupBoxData\")\n self.comboBoxDB = QtWidgets.QComboBox(self.groupBoxData)\n self.comboBoxDB.setGeometry(QtCore.QRect(70, 40, 261, 41))\n self.comboBoxDB.setAcceptDrops(False)\n self.comboBoxDB.setObjectName(\"comboBoxDB\")\n self.pushButtonLoad = QtWidgets.QPushButton(self.groupBoxData)\n self.pushButtonLoad.setGeometry(QtCore.QRect(20, 40, 41, 41))\n self.pushButtonLoad.setObjectName(\"pushButtonLoad\")\n self.pushButtonLoad.clicked.connect(self.__on_click_load)\n\n # button extract\n self.pushButtonExtract = QtWidgets.QPushButton(self.groupBoxAnalyse)\n self.pushButtonExtract.setGeometry(QtCore.QRect(130, 40, 91, 61))\n self.pushButtonExtract.setObjectName(\"pushButtonExtract\")\n self.pushButtonExtract.clicked.connect(self.__on_click_extract)\n\n # button analyse\n self.pushButtonAnalyse = QtWidgets.QPushButton(self.groupBoxAnalyse)\n self.pushButtonAnalyse.setGeometry(QtCore.QRect(240, 40, 91, 61))\n self.pushButtonAnalyse.setObjectName(\"pushButtonAnalyse\")\n self.pushButtonAnalyse.clicked.connect(self.__on_click_analyse)\n self.pushButtonAnalyse.setEnabled(False)\n\n # number of videos\n self.lineEditNrVideos = QtWidgets.QLineEdit(self.groupBoxAnalyse)\n self.lineEditNrVideos.setGeometry(QtCore.QRect(20, 70, 91, 28))\n self.lineEditNrVideos.setLayoutDirection(QtCore.Qt.RightToLeft)\n self.lineEditNrVideos.setText(\"\")\n self.lineEditNrVideos.setObjectName(\"lineEditNrVideos\")\n self.labelNrVideos1 = QtWidgets.QLabel(self.groupBoxAnalyse)\n self.labelNrVideos1.setGeometry(QtCore.QRect(30, 30, 71, 21))\n self.labelNrVideos1.setObjectName(\"labelNrVideos1\")\n self.labelNrVideos2 = QtWidgets.QLabel(self.groupBoxAnalyse)\n self.labelNrVideos2.setGeometry(QtCore.QRect(40, 50, 51, 21))\n self.labelNrVideos2.setObjectName(\"labelNrVideos2\")\n\n # progress bar\n self.progressBar = QtWidgets.QProgressBar(self.groupBoxAnalyse)\n self.progressBar.setGeometry(QtCore.QRect(20, 110, 311, 21))\n self.progressBar.setProperty(\"value\", 0)\n self.progressBar.setObjectName(\"progressBar\")\n\n # console\n self.groupBoxConsole = QtWidgets.QGroupBox(self.centralwidget)\n self.groupBoxConsole.setGeometry(QtCore.QRect(20, 410, 351, 301))\n self.groupBoxConsole.setObjectName(\"groupBoxConsole\")\n self.textEditConsole = QtWidgets.QTextEdit(self.groupBoxConsole)\n self.textEditConsole.setGeometry(QtCore.QRect(20, 40, 311, 241))\n self.textEditConsole.setObjectName(\"textEditConsole\")\n\n MainWindow.setCentralWidget(self.centralwidget)\n\n # menu bar\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1366, 25))\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuHelp = QtWidgets.QMenu(self.menubar)\n self.menuHelp.setObjectName(\"menuHelp\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.actionExit = QtWidgets.QAction(MainWindow)\n self.actionExit.setObjectName(\"actionExit\")\n self.actionExit.setStatusTip('Leave The App')\n self.actionExit.triggered.connect(self.__close_application)\n self.actionAbout = QtWidgets.QAction(MainWindow)\n self.actionAbout.setObjectName(\"actionAbout\")\n self.actionAbout.setStatusTip('Informations about the app')\n self.actionAbout.triggered.connect(self.__about_application)\n self.menuFile.addAction(self.actionExit)\n self.menuHelp.addAction(self.actionAbout)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuHelp.menuAction())\n\n # display data module\n width_val = 9.5\n height_val = 6.5\n self.sentiment_display = DisplayData(self.sentiment, width=width_val, height=height_val)\n self.heatmap_display = DisplayData(self.heatmap, width=width_val, height=height_val)\n self.word_frequency_display = DisplayData(self.word_frequency, width=width_val, height=height_val)\n self.word_cloud_display = DisplayData(self.word_cloud, width=width_val, height=height_val)\n self.accuracy_display = DisplayData(self.accuracy, width=width_val, height=height_val)\n\n self.__retranslateUi(MainWindow)\n self.tabWidgetData.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def __retranslateUi(self, MainWindow):\n \"\"\"Sets the label names and other paramenters\"\"\"\n\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"YouTube Sentiment Analysis\"))\n\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.sentiment), _translate(\"MainWindow\", \"Sentiment\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.heatmap), _translate(\"MainWindow\", \"Heatmap\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.word_frequency),\n _translate(\"MainWindow\", \"Word Frequency\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.word_cloud),\n _translate(\"MainWindow\", \"WordCloud\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.comments), _translate(\"MainWindow\", \"Comments\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.accuracy), _translate(\"MainWindow\", \"Accuracy\"))\n self.groupBoxComments.setTitle(_translate(\"MainWindow\", \"Comments\"))\n self.labelLikeMin.setText(_translate(\"MainWindow\", \"Minimum number of likes:\"))\n self.labelLikeMax.setText(_translate(\"MainWindow\", \"Maximum number of likes:\"))\n self.lineEditLikeMin.setText(_translate(\"MainWindow\", \"10\"))\n self.lineEditLikeMax.setText(_translate(\"MainWindow\", \"1000\"))\n self.lineEditDataset.setText(_translate(\"MainWindow\", \"/home/alex/imdb_data/\"))\n self.groupBoxTraining.setTitle(_translate(\"MainWindow\", \"Training and Accuracy\"))\n self.labelDataset.setText(_translate(\"MainWindow\", \"Dataset path:\"))\n self.pushButtonTrain.setText(_translate(\"MainWindow\", \"Train\"))\n self.labelDatasetLimit.setText(_translate(\"MainWindow\", \"Limit documents:\"))\n self.labelDatasetKfold.setText(_translate(\"MainWindow\", \"k-folds:\"))\n self.pushButtonAccuracy.setText(_translate(\"MainWindow\", \"Accuracy\"))\n self.groupBoxSearch.setTitle(_translate(\"MainWindow\", \"Search Data\"))\n self.groupBoxAnalyse.setTitle(_translate(\"MainWindow\", \"Analyse Data\"))\n self.pushButtonExtract.setText(_translate(\"MainWindow\", \"Extract\"))\n self.pushButtonAnalyse.setText(_translate(\"MainWindow\", \"Analyse\"))\n self.pushButtonAccuracy.setText(_translate(\"MainWindow\", \"Acccuracy\"))\n self.groupBoxConsole.setTitle(_translate(\"MainWindow\", \"Console\"))\n self.tabWidgetData.setTabText(self.tabWidgetData.indexOf(self.settings), _translate(\"MainWindow\", \"Settings\"))\n self.labelNrVideos1.setText(_translate(\"MainWindow\", \"Number of\"))\n self.labelNrVideos2.setText(_translate(\"MainWindow\", \"videos:\"))\n self.pushButtonLoad.setText(_translate(\"MainWindow\", \"Load\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.menuHelp.setTitle(_translate(\"MainWindow\", \"Help\"))\n self.actionExit.setText(_translate(\"MainWindow\", \"Exit\"))\n self.groupBoxData.setTitle(_translate(\"MainWindow\", \"Data from database\"))\n self.actionAbout.setText(_translate(\"MainWindow\", \"About\"))\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)\n self.pushButtonAnalyse.setPalette(palette)\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)\n self.comboBoxDB.setPalette(palette)\n palette = QtGui.QPalette()\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Active, QtGui.QPalette.ToolTipText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(184, 207, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Inactive, QtGui.QPalette.ToolTipText, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.WindowText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Button, brush)\n brush = QtGui.QBrush(QtGui.QColor(196, 225, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Light, brush)\n brush = QtGui.QBrush(QtGui.QColor(155, 192, 231))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Midlight, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Dark, brush)\n brush = QtGui.QBrush(QtGui.QColor(76, 106, 138))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Mid, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Text, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.BrightText, brush)\n brush = QtGui.QBrush(QtGui.QColor(57, 79, 103))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ButtonText, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Base, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Window, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.Shadow, brush)\n brush = QtGui.QBrush(QtGui.QColor(114, 159, 207))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.AlternateBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 220))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipBase, brush)\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))\n brush.setStyle(QtCore.Qt.SolidPattern)\n palette.setBrush(QtGui.QPalette.Disabled, QtGui.QPalette.ToolTipText, brush)\n self.pushButtonLoad.setPalette(palette)\n\n @staticmethod\n def __about_application():\n \"\"\"Open a popup with application details\"\"\"\n\n message_box = QMessageBox()\n message_box.about(message_box, \"About\", \"Youtube Sentiment Analysis Application\\n\"\n \"Degree project\\n\"\n \"Author: Alexandru Grigoras\")\n\n @staticmethod\n def __close_application():\n \"\"\"Close the application\"\"\"\n\n sys.exit()\n\n def __on_click_load(self):\n \"\"\"Load the collection names\"\"\"\n\n # get data\n sd = StoreData()\n try:\n collections = sd.get_collections()\n\n collections.sort()\n\n self.comboBoxDB.clear()\n self.comboBoxDB.addItems(collections)\n\n self.pushButtonAnalyse.setEnabled(True)\n except PyMongoError:\n self.textEditConsole.append(\"> Database server is not opened!\")\n\n @pyqtSlot(name=\"extract\")\n def __on_click_extract(self):\n \"\"\"Extracts the data from YouTube\"\"\"\n\n input_text = self.lineEditSearch.text()\n\n if input_text is not None:\n if input_text is not \"\":\n # get like threshold\n try:\n like_threshold_min = int(self.lineEditLikeMin.text())\n\n if like_threshold_min < 0:\n self.textEditConsole.append(\"> The minimum number of likes should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The minimum number of likes is not valid\")\n return\n\n try:\n like_threshold_max = int(self.lineEditLikeMax.text())\n\n if like_threshold_max < 0:\n self.textEditConsole.append(\"> The maximum number of likes should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The maximum number of likes is not valid\")\n return\n\n try:\n nr_videos = int(self.lineEditNrVideos.text())\n\n if nr_videos < 0:\n self.textEditConsole.append(\"> The number of videos should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The number of videos is not valid\")\n return\n\n # extract data\n crawl_delay = 1\n extracted_data = WebCrawler(input_text, nr_videos, crawl_delay)\n crawling_result = extracted_data.run(self.textEditConsole)\n\n if crawling_result is True:\n # process data\n data = DataAnalysis(input_text, like_threshold_min, like_threshold_max)\n try:\n fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence, comments, videos, author, \\\n comm_time = data.analyse(self.progressBar, self.textEditConsole)\n except TypeError:\n self.textEditConsole.append(\"> No data found or like threshold is too big!\")\n return\n\n # clear any plot if it exists\n self.sentiment_display.clear_plot()\n self.heatmap_display.clear_plot()\n self.word_frequency_display.clear_plot()\n self.word_cloud_display.clear_plot()\n\n progress_value = 80\n\n # plot data\n self.sentiment_display.plot_classifiers(sentiment_val, sentiment_anew_arousal, likes, confidence,\n 'Sentiment', 'Activare', comments, videos, author,\n comm_time)\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.heatmap_display.plot_heatmap(sentiment_val, sentiment_anew_arousal, \"Sentiment\", \"Activare\")\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.word_frequency_display.plot_word_frequency(fd.items())\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.word_cloud_display.plot_wordcloud(pd.get_all_tokens())\n progress_value += 5\n self.progressBar.setValue(progress_value)\n\n # put comments to treeview\n self.__populate_treeview(data)\n else:\n self.textEditConsole.append(\"> The input text is empty!\")\n else:\n self.textEditConsole.append(\"> Invalid input text!\")\n\n @pyqtSlot(name=\"analyse\")\n def __on_click_analyse(self):\n \"\"\"Analyses Data\"\"\"\n\n input_text = self.comboBoxDB.currentText()\n\n if input_text is not None:\n if input_text is not \"\":\n\n try:\n like_threshold_min = int(self.lineEditLikeMin.text())\n\n if like_threshold_min < 0:\n self.textEditConsole.append(\"> The minimum number of likes should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The minimum number of likes is not valid\")\n return\n\n try:\n like_threshold_max = int(self.lineEditLikeMax.text())\n\n if like_threshold_max < 0:\n self.textEditConsole.append(\"> The maximum number of likes should be positive\")\n return\n\n if like_threshold_max <= like_threshold_min:\n self.textEditConsole.append(\"> The maximum number of likes should greater than \"\n \"the minimum number of likes\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The maximum number of likes is not valid\")\n return\n\n # process data\n data = DataAnalysis(input_text, like_threshold_min, like_threshold_max)\n try:\n fd, pd, sentiment_val, sentiment_anew_arousal, likes, confidence, comments, videos, author, \\\n comm_time = data.analyse(self.progressBar, self.textEditConsole)\n except TypeError:\n self.textEditConsole.append(\"> No data found or like threshold is too large\")\n return\n\n # clear any plot if it exists\n self.sentiment_display.clear_plot()\n self.heatmap_display.clear_plot()\n self.word_frequency_display.clear_plot()\n self.word_cloud_display.clear_plot()\n\n progress_value = 80\n\n # plot data\n self.sentiment_display.plot_classifiers(sentiment_val, sentiment_anew_arousal, likes, confidence,\n 'Sentiment', 'Activare', comments, videos, author, comm_time)\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.heatmap_display.plot_heatmap(sentiment_val, sentiment_anew_arousal, \"Sentiment\", \"Activare\")\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.word_frequency_display.plot_word_frequency(fd.items())\n progress_value += 5\n self.progressBar.setValue(progress_value)\n self.word_cloud_display.plot_wordcloud(pd.get_all_tokens())\n progress_value += 5\n self.progressBar.setValue(progress_value)\n\n # put comments to treeview\n self.__populate_treeview(data)\n else:\n self.textEditConsole.append(\"> The input text is empty!\")\n else:\n self.textEditConsole.append(\"> Invalid input text!\")\n\n @pyqtSlot(name=\"train\")\n def __on_click_train(self):\n \"\"\"Train the classifiers\"\"\"\n\n try:\n max_nr_docs = int(self.lineEditDatasetLimit.text())\n\n if max_nr_docs < 1:\n self.textEditConsole.append(\"> The maximum number of documents should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The maximum number of documents is not valid\")\n return\n\n dataset_path = self.lineEditDataset.text()\n\n train_classifier = TrainClassifier(dataset_path, max_nr_docs)\n train_classifier.train(self.progressBar, self.textEditConsole)\n\n @pyqtSlot(name=\"accuracy\")\n def __on_click_accuracy(self):\n \"\"\"Test the accuracy of the classifiers\"\"\"\n\n try:\n max_nr_docs = int(self.lineEditDatasetLimit.text())\n\n if max_nr_docs < 1:\n self.textEditConsole.append(\"> The maximum number of documents should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> The maximum number of documents is not valid\")\n return\n\n try:\n k_fold = int(self.lineEditDatasetKfold.text())\n\n if k_fold < 1:\n self.textEditConsole.append(\"> k should be positive\")\n return\n\n except ValueError:\n self.textEditConsole.append(\"> k number is not valid\")\n return\n\n dataset_path = self.lineEditDataset.text()\n\n # get data\n test_accuracy = TestAccuracy(dataset_path, max_nr_docs)\n results, names = test_accuracy.test_cross_val_score(k_fold, self.progressBar, self.textEditConsole)\n\n # clear any plot if it exists\n self.sentiment_display.clear_plot()\n\n # display data\n self.accuracy_display.plot_accuracy(results, names)\n self.tabWidgetData.setCurrentIndex(5)\n\n def __populate_treeview(self, data):\n \"\"\"Populate the comments tab\"\"\"\n\n # get data\n videos_data = data.get_data_from_DB()\n\n self.treeView.setSelectionBehavior(QAbstractItemView.SelectRows)\n model = QStandardItemModel()\n model.setHorizontalHeaderLabels(['element', 'value'])\n self.treeView.setModel(model)\n\n # parse data\n for video in videos_data:\n\n parent_elem = QStandardItem('video')\n parent_value = QStandardItem(video.get('title'))\n\n id_elem = QStandardItem('_id')\n id_value = QStandardItem(video.get('_id'))\n parent_elem.appendRow([id_elem, id_value])\n\n description_elem = QStandardItem('description')\n description_value = QStandardItem(video.get('description'))\n parent_elem.appendRow([description_elem, description_value])\n\n nr_likes_elem = QStandardItem('nr_likes')\n nr_likes_value = QStandardItem(video.get('nr_likes'))\n parent_elem.appendRow([nr_likes_elem, nr_likes_value])\n\n nr_dislikes_elem = QStandardItem('nr_dislikes')\n nr_dislikes_value = QStandardItem(video.get('nr_dislikes'))\n parent_elem.appendRow([nr_dislikes_elem, nr_dislikes_value])\n\n comments_elem = QStandardItem('comments')\n parent_elem.appendRow(comments_elem)\n\n comments = video.get(\"comments\")\n\n for comment in comments:\n text_elem = QStandardItem('text')\n text_value = QStandardItem(comment.get('text'))\n comments_elem.appendRow([text_elem, text_value])\n\n cid_elem = QStandardItem('cid')\n cid_value = QStandardItem(comment.get('cid'))\n text_elem.appendRow([cid_elem, cid_value])\n\n time_elem = QStandardItem('time')\n time_value = QStandardItem(comment.get('time'))\n text_elem.appendRow([time_elem, time_value])\n\n author_elem = QStandardItem('author')\n author_value = QStandardItem(comment.get('author'))\n text_elem.appendRow([author_elem, author_value])\n\n nr_likes_elem = QStandardItem('nr_likes')\n nr_likes_value = QStandardItem(comment.get('nr_likes'))\n text_elem.appendRow([nr_likes_elem, nr_likes_value])\n\n model.appendRow([parent_elem, parent_value])\n\n\nclass SentimentAnalysisApplication(QMainWindow, UIMainWindow):\n \"\"\"Main application -> initialises User Interface\"\"\"\n\n def __init__(self):\n \"\"\"Class constructor\"\"\"\n\n QMainWindow.__init__(self, flags=QtCore.Qt.Window)\n UIMainWindow.__init__(self)\n self.setupUi(self)\n\n" }, { "alpha_fraction": 0.5502990484237671, "alphanum_fraction": 0.5592169761657715, "avg_line_length": 36.53061294555664, "blob_id": "adefe4390230b05315bb69fe6cb2f9c5a2fcfbb2", "content_id": "a1459b34e37873684e7dbdf058a7e6ca336aa7f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9195, "license_type": "permissive", "max_line_length": 119, "num_lines": 245, "path": "/youtube_sentiment_analysis/modules/training.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Classifiers training\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport glob\nimport os\nimport pickle\nimport time\n\nimport nltk\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.svm import SVC, NuSVC\n\nfrom youtube_sentiment_analysis.modules.process import ProcessData\n\n# Constants\n__all__ = ['TrainClassifier']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\nCLASSIFIERS_PATH = \"youtube_sentiment_analysis/data/classifiers/\"\n\n\nclass TrainClassifier:\n \"\"\"Training class for training and saving the classifiers\"\"\"\n\n def __init__(self, dataset_path=None, max_nr_docs=None, classifiers_names=None):\n \"\"\"Class constructor\"\"\"\n self.__classifiers_names = []\n self.__trained_classifiers = []\n\n if not classifiers_names or classifiers_names == [\"\"]:\n self.__classifiers_names = ['multinomial_naive_bayes', 'logistic_regression', 'nu_svc']\n else:\n for classifier in classifiers_names:\n self.__classifiers_names.append(classifier)\n\n self.__dataset_path = dataset_path\n self.__max_nr_docs = max_nr_docs\n\n def set_classifiers(self, classifiers_names):\n \"\"\"Sets the classifiers to be trained\"\"\"\n\n self.__classifiers_names.clear()\n for classifier in classifiers_names:\n self.__classifiers_names.append(classifier)\n\n def train(self, progress, console):\n \"\"\"Train the classifiers with the data from nltk library\"\"\"\n\n console.append(\"> Selected classifiers: \" + str(self.__classifiers_names))\n\n progress_value = 0\n nr_classifiers = len(self.__classifiers_names)\n\n for classifier_name in self.__classifiers_names:\n console.append(\"> Training the classifier: \" + classifier_name)\n # get the starting time\n classifier_start_time = time.time()\n\n train_set, test_set = self.get_dataset_split()\n\n if classifier_name == 'multinomial_naive_bayes':\n cl_name = \"Multinomial NB classifier\"\n classifier = SklearnClassifier(MultinomialNB(alpha=1))\n\n elif classifier_name == 'bernoulli_naive_bayes':\n cl_name = \"Bernoulli NB Classifier\"\n classifier = SklearnClassifier(BernoulliNB(alpha=1, binarize=0))\n\n elif classifier_name == 'logistic_regression':\n cl_name = \"Logistic Regression\"\n classifier = SklearnClassifier(LogisticRegression(penalty='l2', class_weight='balanced', solver='saga',\n max_iter=2000, n_jobs=-1, warm_start=True))\n\n elif classifier_name == 'svc':\n cl_name = \"SVC\"\n classifier = SklearnClassifier(SVC(kernel='linear', probability=True, gamma='scale',\n class_weight='balanced', max_iter=2000, cache_size=300))\n\n elif classifier_name == 'nu_svc':\n cl_name = \"Nu SVC\"\n classifier = SklearnClassifier(NuSVC(nu=0.5, kernel='linear', probability=True, gamma='scale',\n max_iter=2000, cache_size=300, class_weight='balanced'))\n\n else:\n console.append(\"> Invalid classifier name\")\n return\n\n classifier.train(train_set)\n console.append(\"> \" + cl_name + \" accuracy percent: \" +\n str((nltk.classify.accuracy(classifier, test_set)) * 100) + \"%\")\n\n self.__save_classifier(classifier_name, classifier)\n self.__trained_classifiers.append(classifier)\n\n progress_value += 100/nr_classifiers\n progress.setValue(progress_value)\n\n # get the ending time and calculate elapsed time\n classifier_end_time = time.time()\n classifier_elapsed_time = classifier_end_time - classifier_start_time\n console.append(\"> Training \" + classifier_name + \" finished in \" +\n time.strftime(\"%H:%M:%S\", time.gmtime(classifier_elapsed_time)) + \" seconds\")\n\n def get_classifiers(self, progress=None, console=None):\n \"\"\"Returns the trained classifiers or trains them\"\"\"\n\n classifiers = []\n\n read_directory = os.listdir(CLASSIFIERS_PATH)\n\n if len(read_directory) == 0:\n console.append(\"> Training the classifiers: \")\n self.train(progress, console)\n classifiers = self.get_trained_classifiers()\n else:\n console.append(\"> Getting the trained classifiers: \")\n file_nr = 1\n for f in read_directory:\n console.append(\" \" + str(file_nr) + \". \" + f)\n file_nr = file_nr + 1\n classifiers.append(self.open_classifier(f))\n console.append(\" \" + str(file_nr) + \". vader classifier\")\n console.append(\" \" + str(file_nr + 1) + \". anew classifier\")\n\n return classifiers\n\n def get_trained_classifiers(self):\n \"\"\"Returns a list with trained classifiers objects\"\"\"\n\n return self.__trained_classifiers\n\n @staticmethod\n def __save_classifier(_name, _classifier):\n \"\"\"Save in file to avoid training the data again\"\"\"\n\n save_document = open(CLASSIFIERS_PATH + _name + \".pickle\", 'wb')\n pickle.dump(_classifier, save_document)\n save_document.close()\n\n @staticmethod\n def open_classifier(name):\n \"\"\"Open the trained classifier with the data from nltk library\"\"\"\n\n open_file = open(CLASSIFIERS_PATH + name, 'rb')\n classifier = pickle.load(open_file, encoding='bytes')\n open_file.close()\n\n return classifier\n\n def get_dataset_split(self):\n \"\"\"Get dataset from files (negative and positive words)\n 25000 train + 25000 test (imdb)\"\"\"\n\n file_path_train_neg = glob.glob(self.__dataset_path + 'train/neg/*.txt')\n file_path_test_neg = glob.glob(self.__dataset_path + 'test/neg/*.txt')\n file_path_train_pos = glob.glob(self.__dataset_path + 'train/pos/*.txt')\n file_path_test_pos = glob.glob(self.__dataset_path + 'test/pos/*.txt')\n\n neg_train_ids = []\n pos_train_ids = []\n neg_test_ids = []\n pos_test_ids = []\n\n pd = ProcessData()\n\n max_docs = self.__max_nr_docs / 4\n\n # train data\n nr_docs = 0\n for fp in file_path_train_neg:\n with open(fp, 'r') as f:\n if nr_docs < max_docs or self.__max_nr_docs is -1:\n neg_train_ids = neg_train_ids + [(pd.get_word_feature(f.read().split()), 'neg')]\n nr_docs = nr_docs + 1\n nr_docs = 0\n for fp in file_path_train_pos:\n with open(fp, 'r') as f:\n if nr_docs < max_docs or self.__max_nr_docs is -1:\n pos_train_ids = pos_train_ids + [(pd.get_word_feature(f.read().split()), 'pos')]\n nr_docs = nr_docs + 1\n\n # test data\n nr_docs = 0\n for fp in file_path_test_neg:\n with open(fp, 'r') as f:\n if nr_docs < max_docs or self.__max_nr_docs is -1:\n neg_test_ids = neg_test_ids + [(pd.get_word_feature(f.read().split()), 'neg')]\n nr_docs = nr_docs + 1\n nr_docs = 0\n for fp in file_path_test_pos:\n with open(fp, 'r') as f:\n if nr_docs < max_docs / 4 or self.__max_nr_docs is -1:\n pos_test_ids = pos_test_ids + [(pd.get_word_feature(f.read().split()), 'pos')]\n nr_docs = nr_docs + 1\n\n # concatenate data\n train_set = neg_train_ids + pos_train_ids\n test_set = neg_test_ids + pos_test_ids\n\n return train_set, test_set\n\n def get_dataset_labeled(self):\n \"\"\"Get dataset from files (negative and positive words)\n 25000 train + 25000 test (imdb) with labels\"\"\"\n\n # files path\n file_path_neg = glob.glob(self.__dataset_path + 'train/neg/*.txt') + \\\n glob.glob(self.__dataset_path + 'test/neg/*.txt')\n file_path_pos = glob.glob(self.__dataset_path + 'train/pos/*.txt') + \\\n glob.glob(self.__dataset_path + 'test/pos/*.txt')\n\n text_data = []\n label_values = []\n\n max_docs = self.__max_nr_docs / 2\n\n # negative comments\n nr_docs = 0\n for fp in file_path_neg:\n with open(fp, 'r') as f:\n if nr_docs < max_docs or self.__max_nr_docs is -1:\n text_data.append(f.read())\n label_values.append(-1)\n nr_docs = nr_docs + 1\n\n # positive comments\n nr_docs = 0\n for fp in file_path_pos:\n with open(fp, 'r') as f:\n if nr_docs < max_docs or self.__max_nr_docs is -1:\n text_data.append(f.read())\n label_values.append(1)\n nr_docs = nr_docs + 1\n\n return text_data, label_values\n" }, { "alpha_fraction": 0.5734297037124634, "alphanum_fraction": 0.5860822200775146, "avg_line_length": 22.542552947998047, "blob_id": "f87424bdcd853ba8227a3aa030829c2f38ac7c23", "content_id": "a67e4f74385d7431f45463967317558c978253c6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6639, "license_type": "permissive", "max_line_length": 77, "num_lines": 282, "path": "/youtube_sentiment_analysis/modules/sentiment_module/sentiment.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n#- SENTIMENT.PY ------------------------------------------------------------#\n# Routines to calulate average valence and arousal for one or more terms\t#\n# using the ANEW and Happiness sentiment dictionaries\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n#- Modification History: ---------------------------------------------------#\n# When:\t\tWho:\t\t\t\t\tComments:\t\t\t\t\t\t\t#\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#\n# 28-Sep-14\tChristopher G. Healey\tConverted from Javascript\t\t\t#\n# 17-Dec-17\tChristopher G. Healey\tChanged to SENTIMENT.PY to clarify #\n#\t\t\t\t\t\t\t\t\t\tthese are not ANEW-only terms\t\t#\n#---------------------------------------------------------------------------#\n\nimport math\nimport nltk\n\n# Import raw ANEW and Happiness dictionary data from term file\n\nfrom .sentiment_term import anew_word as anew_word\nfrom .sentiment_term import anew_stem as anew_stem\nfrom .sentiment_term import hapi_word as hapi_word\n\n__all__ = ['add_term', 'sentiment', 'exist']\n\n# Setup a \"custom\" dictionary to allow users to extend the ANEW and\n# happiness dictionaries\n\ncust_dict = { }\t\t\t\t\t\t\t# Custom dictionary, raw terms\ncust_stem = { }\t\t\t\t\t\t\t# Custom dictionary, stemmed terms\n\n\ndef add_term( term, v, a, replace = False ):\n\n# Add a term to the custom dictionary; if it already exists one of\n# the default dictionaries, the request will be ignored unless the\n# user explicitly asks for the value to be changed\n#\n# term: Term to add\n# v: Valence\n# a: Arousal\n# replace: Replace term that exists in default dictionaries\n\n\tglobal cust_dict\n\tglobal cust_stem\n\n\n\t# If term already exists and user does not ask to replace it, stop\n\n\tif exist( term ) and replace != True:\n\t\treturn\n\n\t# Otherwise either replace it or add it to the custom dictionary\n\n\tif term in anew_word and replace == True:\n\t\tanew_word[ term ][ 'avg' ][ 0 ] = a\n\t\tanew_word[ term ][ 'avg' ][ 1 ] = v\n\telif term in anew_stem and replace == True:\n\t\tanew_stem[ term ][ 'avg' ][ 0 ] = a\n\t\tanew_stem[ term ][ 'std' ][ 1 ] = v\n\telif term in hapi_word:\n\t\thapi_word[ term ][ 'avg' ][ 0 ] = a\n\t\thapi_word[ term ][ 'std' ][ 1 ] = v\n\telse:\n\t\tcust_dict[ term ] = { }\n\t\tcust_dict[ term ][ 'dict' ] = \"custom\"\n\t\tcust_dict[ term ][ 'word' ] = term\n\t\tcust_dict[ term ][ 'avg' ] = [ a, v ]\n\t\tcust_dict[ term ][ 'std' ] = [ 1, 1 ]\n\t\tcust_dict[ term ][ 'fq' ] = 1\n\n\t\t# Build a stem for the custom term\n\n\t\tporter = nltk.stem.porter.PorterStemmer()\n\t\tstem = porter.stem( term )\n\n\t\tcust_dict[ term ][ 'stem' ] = porter.stem( term )\n\n\t\t# Add term to custom stem dictionary with stem as key\n\n\t\tcust_stem[ stem ] = { }\n\t\tcust_stem[ stem ][ 'dict' ] = \"custom\"\n\t\tcust_stem[ stem ][ 'word' ] = stem\n\t\tcust_stem[ stem ][ 'stem' ] = stem\n\t\tcust_stem[ stem ][ 'avg' ] = [ a, v ]\n\t\tcust_stem[ stem ][ 'std' ] = [ 1, 1 ]\n\t\tcust_stem[ stem ][ 'fq' ] = 1\n\n# End function add_term\n\n\ndef arousal( term ):\n\n# Return the average arousal for a term\n#\n# term: Term to check (can be string or list of strings)\n\n\tif isinstance( term, str ):\n\t\treturn arousal_raw( term )[ 0 ]\n\n\telif not isinstance( term, list ):\n\t\treturn 0.0\n\n\t# At this point we know we're working with a list of terms\n\n\tc = 2.0 * math.pi\n\tprob = [ ]\n\tprob_sum = 0.0\n\ta_mu = [ ]\n\n\tfor t in term:\n\t\tif exist( t ):\n\t\t\ta = arousal_raw( t )\n\n\t\t\tp = 1.0 / math.sqrt( c * math.pow( a[ 1 ], 2.0 ) )\n\t\t\tprob.append( p )\n\t\t\tprob_sum = prob_sum + p\n\t\t\n\t\t\ta_mu.append( a[ 0 ] )\n\n\tarousal = 0.0\n\tfor i in range( 0, len( a_mu ) ):\n\t\tarousal = arousal + ( prob[ i ] / prob_sum * a_mu[ i ] )\n\n\treturn arousal\n\n# End function arousal\n\n\ndef arousal_raw( term ):\n\n# Return the raw arousal for a single term\n#\n# term: Term to check\n\n\tglobal cust_dict\n\tglobal cust_stem\n\n\n\tif not exist( term ):\n\t\tavg = 0.0\n\t\tstd = 0.0\n\telif term in anew_word:\n\t\tavg = anew_word[ term ][ 'avg' ][ 1 ]\n\t\tstd = anew_word[ term ][ 'std' ][ 1 ]\n\telif term in anew_stem:\n\t\tavg = anew_stem[ term ][ 'avg' ][ 1 ]\n\t\tstd = anew_stem[ term ][ 'std' ][ 1 ]\n\telif term in cust_dict:\n\t\tavg = cust_dict[ term ][ 'avg' ][ 1 ]\n\t\tstd = cust_dict[ term ][ 'std' ][ 1 ]\n\telif term in cust_stem:\n\t\tavg = cust_stem[ term ][ 'avg' ][ 1 ]\n\t\tstd = cust_stem[ term ][ 'std' ][ 1 ]\n\telse:\n\t\tavg = hapi_word[ term ][ 'avg' ][ 1 ]\n\t\tstd = hapi_word[ term ][ 'std' ][ 1 ]\n\n\treturn [ avg, std ]\n\n# End function arousal_raw\n\n\ndef exist( term ):\n\n# Return True if a term exists in one of the sentiment dictionaries,\n# False otherwise\n#\n# term: Term to check (can be string or list of strings)\n\n\tglobal cust_dict\n\tglobal cust_stem\n\n\n\tif isinstance( term, str ):\n\t\tex = term in anew_word or term in anew_stem or\\\n\t\t term in hapi_word or term in cust_dict or term in cust_stem\n\t\treturn ex\n\n\telif isinstance( term, list ):\n\t\tterm_list = [ ]\n\n\t\tfor t in term:\n\t\t\tex = t in anew_word or t in anew_stem or\\\n\t\t\t t in hapi_word or t in cust_dict or t in cust_stem\n\t\t\tterm_list.append( ex )\n\n\t\treturn term_list\n\n\telse:\n\t\treturn False\n\n# End function exist\n\n\ndef sentiment( term ):\n\n# Return the valence and arousal sentiment for a term\n#\n# term: Term to check (can be string or list of strings)\n\n\tsen = { 'valence': 0.0, 'arousal': 0.0 }\n\n\tif isinstance( term, str ) or isinstance( term, list ):\n\t\tsen[ 'valence' ] = valence( term )\n\t\tsen[ 'arousal' ] = arousal( term )\n\n\treturn sen\n\n# End function sentiment\n\n\ndef valence( term ):\n\n# Return the average valence for a term\n#\n# term: Term to check (can be string or list of strings)\n\n\tif isinstance( term, str ):\n\t\treturn valence_raw( term )[ 0 ]\n\n\telif not isinstance( term, list ):\n\t\treturn 0.0\n\n\t# At this point we know we're working with a list of terms\n\n\tc = 2.0 * math.pi\n\tprob = [ ]\n\tprob_sum = 0.0\n\tv_mu = [ ]\n\n\tfor t in term:\n\t\tif exist( t ):\n\t\t\tv = valence_raw( t )\n\n\t\t\tp = 1.0 / math.sqrt( c * math.pow( v[ 1 ], 2.0 ) )\n\t\t\tprob.append( p )\n\t\t\tprob_sum = prob_sum + p\n\t\t\n\t\t\tv_mu.append( v[ 0 ] )\n\n\tval = 0.0\n\tfor i in range( 0, len( v_mu ) ):\n\t\tval = val + ( prob[ i ] / prob_sum * v_mu[ i ] )\n\n\treturn val\n\n# End function valence\n\n\ndef valence_raw( term ):\n\n# Return the raw valence for a single term\n#\n# term: Term to check\n\n\tglobal cust_dict\n\tglobal cust_stem\n\n\n\tif not exist( term ):\n\t\tavg = 0.0\n\t\tstd = 0.0\n\telif term in anew_word:\n\t\tavg = anew_word[ term ][ 'avg' ][ 0 ]\n\t\tstd = anew_word[ term ][ 'std' ][ 0 ]\n\telif term in anew_stem:\n\t\tavg = anew_stem[ term ][ 'avg' ][ 0 ]\n\t\tstd = anew_stem[ term ][ 'std' ][ 0 ]\n\telif term in cust_dict:\n\t\tavg = cust_dict[ term ][ 'avg' ][ 0 ]\n\t\tstd = cust_dict[ term ][ 'std' ][ 0 ]\n\telif term in cust_stem:\n\t\tavg = cust_stem[ term ][ 'avg' ][ 0 ]\n\t\tstd = cust_stem[ term ][ 'std' ][ 0 ]\n\telse:\n\t\tavg = hapi_word[ term ][ 'avg' ][ 0 ]\n\t\tstd = hapi_word[ term ][ 'std' ][ 0 ]\n\n\treturn [ avg, std ]\n\n# End function valence_raw\n" }, { "alpha_fraction": 0.5937548875808716, "alphanum_fraction": 0.5979915261268616, "avg_line_length": 36.046512603759766, "blob_id": "253f36726e8538837c9112e0cff3e6b1513a8385", "content_id": "f9ef7f125c20a6f1feb4aadda792e9f98f636f81", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6373, "license_type": "permissive", "max_line_length": 114, "num_lines": 172, "path": "/youtube_sentiment_analysis/modules/accuracy.py", "repo_name": "alexgrigoras/youtube_consumer_perception", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n\"\"\"\n Classifiers accuracy\n @alexandru_grigoras\n\"\"\"\n\n# Libraries\nimport time\n\nimport numpy as np\nfrom nltk.sentiment import SentimentIntensityAnalyzer\nfrom sklearn import model_selection\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.svm import NuSVC\n\nfrom youtube_sentiment_analysis.modules.process import ProcessData\nfrom youtube_sentiment_analysis.modules.sentiment_module import sentiment as anew\nfrom youtube_sentiment_analysis.modules.training import TrainClassifier\nfrom youtube_sentiment_analysis.modules.vote_classifier import VoteClassifier\n\n# Constants\n\n__all__ = ['TestAccuracy']\n__version__ = '1.0'\n__author__ = 'Alexandru Grigoras'\n__email__ = '[email protected]'\n__status__ = 'release'\n\nCLASSIFIERS_PATH = \"youtube_sentiment_analysis/data/classifiers_large_dataset/\"\n\n\nclass TestAccuracy:\n \"\"\"Class for testing the accuracy of the algorithms\"\"\"\n\n def __init__(self, dataset_path, max_nr_docs):\n \"\"\"Class constructor\"\"\"\n self.__dataset_path = dataset_path\n self.__max_nr_docs = max_nr_docs\n\n def test_cross_val_score(self, k_fold, progress, console):\n \"\"\"Testing Classifiers Accuracy using Cross-Validation Method\"\"\"\n\n # get the starting time\n start_time = time.time()\n\n tc = TrainClassifier(self.__dataset_path, self.__max_nr_docs)\n\n text_data, label_values = tc.get_dataset_labeled()\n\n x_elements = np.array(text_data)\n y_elements = np.array(label_values)\n\n sid = SentimentIntensityAnalyzer()\n\n vader_accuracy = []\n anew_accuracy = []\n voting_accuracy = []\n\n process_data = ProcessData()\n\n progress_value = 0\n\n # machine learning classifiers\n classifiers = tc.get_classifiers(progress, console)\n voted_classifier = VoteClassifier(classifiers)\n\n kf = KFold(n_splits=k_fold, random_state=None, shuffle=False)\n for train_index, test_index in kf.split(x_elements):\n x_train, x_test = x_elements[train_index], x_elements[test_index]\n y_train, y_test = y_elements[train_index], y_elements[test_index]\n\n test_values_vader = []\n test_values_anew = []\n test_values_voting = []\n predicted_values = []\n\n for text, value in zip(x_test, y_test):\n process_data.process_text(text)\n\n ss = sid.polarity_scores(text)\n\n if ss[\"compound\"] >= 0:\n test_values_vader.append(\"positive\")\n else:\n test_values_vader.append(\"negative\")\n\n tokens = process_data.get_tokens()\n\n if anew.sentiment(tokens)['valence'] >= 5.8:\n test_values_anew.append(\"positive\")\n else:\n test_values_anew.append(\"negative\")\n\n if value == -1:\n predicted_values.append(\"negative\")\n else:\n predicted_values.append(\"positive\")\n\n # machine learning algorithms sentiment value\n ml_algorithms_sentiment = voted_classifier.classify(text, process_data)\n\n if ml_algorithms_sentiment >= 0:\n test_values_voting.append(\"positive\")\n else:\n test_values_voting.append(\"negative\")\n\n acc_vader = accuracy_score(test_values_vader, predicted_values, normalize=True)\n acc_anew = accuracy_score(test_values_anew, predicted_values, normalize=True)\n acc_voting = accuracy_score(test_values_voting, predicted_values, normalize=True)\n\n vader_accuracy.append(acc_vader)\n anew_accuracy.append(acc_anew)\n voting_accuracy.append(acc_voting)\n\n progress_value += 40 / k_fold\n progress.setValue(progress_value)\n\n vader_accuracy_array = np.array(vader_accuracy)\n anew_accuracy_array = np.array(anew_accuracy)\n voting_accuracy_array = np.array(voting_accuracy)\n\n console.append(\"> %s: %f (%f)\" % (\"VADER\", vader_accuracy_array.mean(), vader_accuracy_array.std()))\n console.append(\"> %s: %f (%f)\" % (\"ANEW\", anew_accuracy_array.mean(), anew_accuracy_array.std()))\n console.append(\"> %s: %f (%f)\" % (\"VOTING\", voting_accuracy_array.mean(), voting_accuracy_array.std()))\n\n # prepare configuration for cross validation test harness\n models = [('NuSVC', NuSVC(nu=0.5, kernel='linear', probability=True, gamma='scale', cache_size=500,\n class_weight='balanced')),\n ('LR', LogisticRegression(penalty='l2', class_weight='balanced', solver='saga', max_iter=5000,\n n_jobs=-1, warm_start=True)),\n ('MNB', MultinomialNB(alpha=1))]\n\n # evaluate each model in turn\n results = []\n names = []\n show_info = 0\n\n # add the VADER and ANEW classifiers\n results.append(voting_accuracy)\n names.append(\"VOTING\")\n\n for name, model in models:\n tf_idf = TfidfVectorizer()\n classifier = make_pipeline(tf_idf, model)\n cv_results = model_selection.cross_val_score(classifier, x_elements, y_elements,\n cv=k_fold, scoring='accuracy', n_jobs=-1,\n verbose=show_info)\n results.append(cv_results)\n names.append(name)\n console.append(\"> %s: %f (%f)\" % (name, cv_results.mean(), cv_results.std()))\n\n progress_value += 20\n progress.setValue(progress_value)\n\n # add vader and anew classifiers\n results.append(vader_accuracy)\n names.append(\"VADER\")\n results.append(anew_accuracy)\n names.append(\"ANEW\")\n\n # get the ending time and calculate elapsed time\n end_time = time.time()\n elapsed_time = end_time - start_time\n console.append(\"> Data processed in \" + time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)) + \" seconds\")\n\n return results, names\n\n" } ]
13
grant23/website
https://github.com/grant23/website
7f707d19a01cce1559188c24b9484bcdf3b85886
14115e1bef4015ef933187f9a16d54d1186dbbe3
535c54718f1993c8a03bb553f1355c3600cc620b
refs/heads/master
2021-01-16T01:02:00.698669
2015-06-11T08:24:57
2015-06-11T08:24:57
37,224,543
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6001026630401611, "alphanum_fraction": 0.604722797870636, "avg_line_length": 33.175437927246094, "blob_id": "6fde89c2506b55508cc88669384e8d9c444a7273", "content_id": "06b44cfdf6cb8b6d98c9a6d9196f2753443af0ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1948, "license_type": "no_license", "max_line_length": 89, "num_lines": 57, "path": "/info.py", "repo_name": "grant23/website", "src_encoding": "UTF-8", "text": "import os, pprint, sqlite3\nfrom collections import namedtuple\n\ndef open_database(path='info.db'):\n new = not os.path.exists(path)\n db = sqlite3.connect(path)\n if new:\n c = db.cursor()\n c.execute( 'CREATE TABLE information( id INTEGER PRIMARY KEY,'\n 'account TEXT, password TEXT, age INTEGER, introduction TEXT )' )\n add_information( db, 'aaa', 'aaa', '20', 'I am aaa.' )\n add_information( db, 'bbb', 'bbb', '25', 'I am bbb.' )\n db.commit()\n return db\n\ndef add_information( db, account, password, age, introduction ):\n db.cursor().execute( 'INSERT INTO information ( account, password, age, introduction )'\n ' VALUES (?, ?, ?, ?)', ( account, password, age, introduction ) )\n\ndef get_information( db, account ):\n c = db.cursor()\n c.execute( 'SELECT * FROM information WHERE account = ?', ( account, ) )\n Row = namedtuple( 'Row', [tup[0] for tup in c.description] )\n return [Row(*row) for row in c.fetchall()]\n\ndef update_information( db, account, password = '', age = '', introduction = '' ):\n c = db.cursor()\n\n if password:\n c.execute( 'UPDATE information SET password = ?'\n ' WHERE account = ?', ( password, account ) )\n\n if age:\n c.execute( 'UPDATE information SET age = ?'\n ' WHERE account = ?', ( age, account ) )\n\n if introduction:\n c.execute( 'UPDATE information SET introduction = ?'\n ' WHERE account = ?', ( introduction, account ) )\n\n\ndef has_account( db, account, password ):\n c = db.cursor()\n c.execute( 'SELECT * FROM information WHERE account =?'\n ' AND password = ?', ( account, password ) )\n Row = namedtuple( 'Row', [tup[0] for tup in c.description] )\n result = [Row(*row) for row in c.fetchall()]\n\n if len( result ) == 0:\n return False\n else:\n return True\n\nif __name__ == '__main__':\n db = open_database()\n print( get_information( db, 'aaa' ))\n pprint.pprint(get_information( db, 'aaa' ))\n" } ]
1
Yeming945/DjangoCMDB
https://github.com/Yeming945/DjangoCMDB
9540e1e5df11125a5c6f6b50be5ece1e3b09f71b
569d8efb7ec85f495330bcff9fb60a2fa40a370d
a64a9a5e6245871a31b25536cf5aeb38c0670fda
refs/heads/master
2023-04-27T05:00:16.119918
2020-01-15T05:28:59
2020-01-15T05:28:59
232,456,353
0
0
null
2020-01-08T02:03:56
2020-01-15T05:29:16
2023-04-21T20:45:00
Python
[ { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.75, "avg_line_length": 21, "blob_id": "b35241f2cf70fc1e652326be503f960bf6c84068", "content_id": "ba05686522736b469735353c2ad6bc8a50d22ca0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 190, "license_type": "no_license", "max_line_length": 54, "num_lines": 6, "path": "/README.md", "repo_name": "Yeming945/DjangoCMDB", "src_encoding": "UTF-8", "text": "# 这是一个跟随大刘博客做的Django CMDB系统\n[大刘链接](https://www.liujiangblog.com/course/django/116)\n\n##### 测试数据发送(增加新资产)\n\npython main.py report_data\n" }, { "alpha_fraction": 0.758152186870575, "alphanum_fraction": 0.758152186870575, "avg_line_length": 22, "blob_id": "7eb1129db377f44228034494beb20bf7030393cf", "content_id": "8f0bf76530aa34731b1c28d0a827cb946c8f3353", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/Client/bin/main.py", "repo_name": "Yeming945/DjangoCMDB", "src_encoding": "UTF-8", "text": "\"\"\" 把客户端信息收集脚本做成windows和linux两个不同的版本\n通过 os 和 sys 模块的配合,将当前客户端所在目录设置为工作目录,如果不这么做,会无法导入其它模块;\nhandler 模块是核心代码模块,在 core 目录中,我们一会来实现它。\n以后调用客户端就只需要执行python main.py 参数就可以了\n\"\"\"\n\nimport os\nimport sys\n\nBASE_DIR = os.path.dirname(os.getcwd())\n# 设置工作目录, 使得包和模块能够正常导入\nsys.path.append(BASE_DIR)\n\nfrom core import handler\nif __name__ == '__main__':\n handler.ArgvHandler(sys.argv)\n" }, { "alpha_fraction": 0.4967948794364929, "alphanum_fraction": 0.692307710647583, "avg_line_length": 16.33333396911621, "blob_id": "489441cf19f5d1b9685bdc0913580e1d6103140b", "content_id": "c7698120062cdf66650a591c9efca21565923132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 312, "license_type": "no_license", "max_line_length": 29, "num_lines": 18, "path": "/requirements.txt", "repo_name": "Yeming945/DjangoCMDB", "src_encoding": "UTF-8", "text": "astroid==2.2.5\nautopep8==1.4.4\ncolorama==0.4.1\nDjango==2.2.8\ndjango-ranged-response==0.2.0\ndjango-simple-captcha==0.5.12\nisort==4.3.21\nlazy-object-proxy==1.4.2\nmccabe==0.6.1\nPillow>=6.2.0\npycodestyle==2.5.0\npylint==2.3.1\npytz==2019.2\nsix==1.12.0\nsqlparse==0.3.0\ntyped-ast==1.4.0\nwrapt==1.11.2\nmysqlclient==1.4.6\n" }, { "alpha_fraction": 0.47043442726135254, "alphanum_fraction": 0.4786700904369354, "avg_line_length": 36.62131118774414, "blob_id": "50e905a40f1e6a4787fd6a1d226eddc6ea3eb25e", "content_id": "b95bcae5d544346d25fd68bf971709f3ce8d25a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 26031, "license_type": "no_license", "max_line_length": 88, "num_lines": 610, "path": "/assets/models.py", "repo_name": "Yeming945/DjangoCMDB", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nfrom django.contrib.auth.models import User\n\"\"\" 导入 django.contrib.auto.models 内置的 User 表,作为我们 CMDB 项目的用户表,用于保存管理员和批准人员的信息;\"\"\"\n\n\nclass Asset(models.Model):\n \"\"\" 所有资产的共有数据表\n sn 这个数据字段是所有资产都必须有,并且唯一不可重复的!通常来自自动收集的数据中;\n name 和 sn 一样,也是唯一的;\n asset_type_choice 和 asset_status 分别设计为两个选择类型\n adamin 和 approved_by 是分别是当前资产的管理员和将该资产上线的审批员,为了区分他们,设置了 related_name;\n Asset 表中的很多字段内容都无法自动获取,需要我们手动输入,比如合同、备注。\n 最关键的是其中的一些外键字段,设置为on_delete=models.SET_NULL,这样的话,当关联的对象被删除的时候,不会影响到资产数据表。\n \"\"\"\n\n asset_type_choice = (\n ('server', '服务器'),\n ('networddevice', '网络设备'),\n ('storagedevice', '存储设备'),\n ('securitydevice', '安全设备'),\n ('software', '软件资产'),\n )\n\n asset_status = (\n (0, '在线'),\n (1, '下线'),\n (2, '未知'),\n (3, '故障'),\n (4, '备用'),\n )\n asset_type = models.CharField(choices=asset_type_choice,\n max_length=64,\n default='server',\n verbose_name='资产类型')\n # unique=True时,在整个数据表内该字段的数据不可重复\n name = models.CharField(max_length=64, unique=True, verbose_name='资产名称')\n sn = models.CharField(max_length=128, unique=True, verbose_name='资产序列号')\n # 将外键字段设为null\n business_unit = models.ForeignKey('BusinessUnit',\n null=True,\n blank=True,\n verbose_name='所属业务线',\n on_delete=models.SET_NULL)\n status = models.SmallIntegerField(choices=asset_status,\n default=0,\n verbose_name='设备状态')\n manufacturer = models.ForeignKey('ManuFacturer',\n null=True,\n blank=True,\n verbose_name='制造商',\n on_delete=models.SET_NULL)\n manage_ip = models.GenericIPAddressField(null=True,\n blank=True,\n verbose_name='管理IP')\n tags = models.ManyToManyField('Tag', blank=True, verbose_name='标签')\n # related_name 用于关联对象反向引用模型的名称\n admin = models.ForeignKey(User,\n null=True,\n blank=True,\n verbose_name='资产管理员',\n related_name='admin',\n on_delete=models.SET_NULL)\n idc = models.ForeignKey('IDC',\n null=True,\n blank=True,\n verbose_name='所在机房',\n on_delete=models.SET_NULL)\n contract = models.ForeignKey('Contract',\n null=True,\n blank=True,\n verbose_name='合同',\n on_delete=models.SET_NULL)\n\n purchase_day = models.DateField(null=True, blank=True, verbose_name='购买日期')\n expire_day = models.DateField(null=True, blank=True, verbose_name='过保日期')\n price = models.FloatField(null=True, blank=True, verbose_name='购买价格')\n\n approved_by = models.ForeignKey(User,\n null=True,\n blank=True,\n verbose_name='批准人',\n related_name='approved_by',\n on_delete=models.SET_NULL)\n\n memo = models.TextField(null=True, blank=True, verbose_name='备注')\n c_time = models.DateTimeField(auto_now_add=True, verbose_name='批准日期')\n m_time = models.DateTimeField(auto_now=True, verbose_name='更新日期')\n\n # 当print输出实例对象或str() 实例对象时,调用这个方法\n def __str__(self):\n return '<%s> %s' % (self.get_asset_type_display(), self.name)\n\n class Meta:\n verbose_name = '资产总表' # 设置模型对象的直观、人类可读的名称\n verbose_name_plural = verbose_name\n ordering = ['-c_time'] # 指定该模型生成的所有对象的排序方式\n\n\nclass Server(models.Model):\n \"\"\" 服务器设备\n 每台服务器都唯一关联着一个资产对象,因此使用 OneToOneField 构建了一个一对一字段,这非常重要!\n 服务器又可分为几种子类型,这里定义了三种;\n 服务器添加的方式可以分为手动和自动;\n 有些服务器是虚拟机或者 docker 生成的,没有物理实体,存在于宿主机中,因此需要增加一个 hosted_on 字段;这里认为,宿主机如果被删除,虚拟机也就不存在了;\n 服务器有型号信息,如果硬件信息中不包含,那么指的就是主板型号;\n Raid 类型在采用了 Raid 的时候才有,否则为空\n 操作系统相关信息包含类型、发行版本和具体版本\n \"\"\"\n\n sub_asset_type_choice = (\n (0, 'PC服务器'),\n (1, '刀片机'),\n (2, '小型机'),\n )\n created_by_choice = (\n ('auto', '自动添加'),\n ('manual', '手工添加'),\n )\n\n # 非常关键的一对一关联!asset被删除的时候一并删除server\n asset = models.OneToOneField('Asset', on_delete=models.CASCADE)\n sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice,\n default=0,\n verbose_name='服务器类型')\n created_by = models.CharField(choices=created_by_choice,\n max_length=32,\n default='auto',\n verbose_name='添加方式')\n hosted_on = models.ForeignKey('self',\n related_name='hosted_on_server',\n blank=True,\n null=True,\n verbose_name='宿主机',\n on_delete=models.CASCADE) # 虚拟机专用字段\n model = models.CharField(max_length=128,\n null=True,\n blank=True,\n verbose_name='服务器型号')\n raid_type = models.CharField(max_length=512,\n null=True,\n blank=True,\n verbose_name='Raid类型')\n\n os_type = models.CharField(max_length=64,\n null=True,\n blank=True,\n verbose_name='操作系统类型')\n os_distribution = models.CharField(max_length=64,\n null=True,\n blank=True,\n verbose_name='发行商')\n os_release = models.CharField(max_length=64,\n null=True,\n blank=True,\n verbose_name='操作系统版本')\n\n def __str__(self):\n return '%s--%s--%s <sn:%s>' % (self.asset.name,\n self.get_sub_asset_type_dispaly(),\n self.model, self.asset.sn)\n\n class Meta:\n verbose_name = '服务器'\n verbose_name_plural = verbose_name\n\n\nclass SecurityDevice(models.Model):\n \"\"\" 安全设备 \"\"\"\n\n sub_asset_type_choice = (\n (0, '防火墙'),\n (1, '入侵检测设备'),\n (2, '互联网网关'),\n (4, '运维审计系统'),\n )\n asset = models.OneToOneField('Asset', on_delete=models.CASCADE)\n sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice,\n default=0,\n verbose_name='安全设备类型')\n model = models.CharField(max_length=128,\n default='未知型号',\n verbose_name='安全设备型号')\n\n def __str__(self):\n return self.asset.name + '--' + self.get_sub_asset_type_dispaly(\n ) + str(self.model) + \" id:%s \" % self.id\n\n class Meta:\n verbose_name = '安全设备'\n verbose_name_plural = verbose_name\n\n\nclass StorageDevice(models.Model):\n \"\"\" 存储设备 \"\"\"\n sub_asset_type_choice = (\n (0, '磁盘阵列'),\n (1, '网络存储器'),\n (2, '磁带库'),\n (4, '磁带机'),\n )\n\n asset = models.OneToOneField('Asset', on_delete=models.CASCADE)\n sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice,\n default=0,\n verbose_name='存储设备类型')\n model = models.CharField(max_length=128,\n default='未知型号',\n verbose_name='存储设备型号')\n\n def __str__(self):\n return self.asset.name + '--' + self.get_sub_asset_type_dispaly(\n ) + str(self.model) + 'id:%s' % self.id\n\n class Meta:\n verbose_name = '存储设备'\n verbose_name_plural = verbose_name\n\n\nclass NetworkDevice(models.Model):\n \"\"\" 网络设备 \"\"\"\n sub_asset_type_choice = (\n (0, '路由器'),\n (1, '交换机'),\n (2, '负载均衡'),\n (4, 'VPN设备'),\n )\n\n asset = models.OneToOneField('Asset', on_delete=models.CASCADE)\n sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice,\n default=0,\n verbose_name='网络设备类型')\n model = models.CharField(max_length=128,\n default='未知型号',\n verbose_name='网络设备型号')\n\n vlan_ip = models.GenericIPAddressField(blank=True,\n null=True,\n verbose_name='VlanIP')\n intranet_ip = models.GenericIPAddressField(blank=True,\n null=True,\n verbose_name='内网IP')\n\n firmware = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='设备固件版本')\n port_num = models.SmallIntegerField(null=True,\n blank=True,\n verbose_name='端口个数')\n device_detail = models.TextField(null=True,\n blank=True,\n verbose_name='详细配置')\n\n def __str__(self):\n return '%s--%s--%s <sn:%s>' % (self.asset.name,\n self.get_sub_asset_type_dispaly(),\n self.modelm, self.asset.sn)\n\n class Meta:\n verbose_name = '网络设备'\n verbose_name_plural = verbose_name\n\n\nclass Software(models.Model):\n \"\"\" 只保存付费购买的软件\n 对于软件,它没有物理形体,因此无须关联一个资产对象;\n 软件只管理那些大型的收费软件,关注点是授权数量和软件版本。对于那些开源的或者免费的软件,显然不算公司的资产\n \"\"\"\n sub_asset_type_choice = (\n (0, '操作系统'),\n (1, '办公/开发软件'),\n (2, '业务软件'),\n )\n\n sub_asset_type = models.SmallIntegerField(choices=sub_asset_type_choice,\n default=0,\n verbose_name='网络设备类型')\n license_num = models.IntegerField(default=1, verbose_name='授权数量')\n version = models.CharField(max_length=64,\n unique=True,\n help_text='例如: RedHat relate 7 (Final)',\n verbose_name='软件/系统版本')\n\n def __str__(self):\n return '%s--%s' % (self.get_sub_asset_type_dispaly(), self.version())\n\n class Meta:\n verbose_name = '软件/系统'\n verbose_name_plural = verbose_name\n\n\nclass CPU(models.Model):\n \"\"\" CPU组件 \"\"\"\n asset = models.OneToOneField('Asset', on_delete=models.CASCADE)\n cpu_model = models.CharField(max_length=128, verbose_name='CPU型号')\n # 正整数字段,包含0,最大2147483647。\n cpu_count = models.PositiveIntegerField(default=1, verbose_name='物理CPU个数')\n # 较小的正整数字段,从0到32767\n cpu_core_count = models.PositiveSmallIntegerField(default=1,\n verbose_name='CPU核数')\n cpu_thread_count = models.PositiveSmallIntegerField(default=1,\n verbose_name='CPU线程数')\n cpu_frequency = models.DecimalField(max_digits=3,\n decimal_places=2,\n verbose_name='CPU主频(GHZ)')\n\n def __str__(self):\n return '%s: %s' % (self.asset.name, self.cpu.model)\n\n class Meta:\n verbose_name = 'CPU'\n verbose_name_plural = verbose_name\n\n\nclass RAM(models.Model):\n \"\"\" 内存组件 \"\"\"\n asset = models.ForeignKey('Asset', on_delete=models.CASCADE)\n sn = models.CharField('SN号', max_length=128, blank=True, null=True)\n model = models.CharField('内存型号', max_length=128, blank=True, null=True)\n manufacturer = models.CharField('内存制造商',\n max_length=128,\n blank=True,\n null=True)\n slot = models.CharField('插槽', max_length=64)\n capacity = models.IntegerField('内存大小(GB)', blank=True, null=True)\n frequency = models.IntegerField('内存频率(MHZ)', blank=True, null=True)\n\n def __str__(self):\n return '%s: %s :%s :%s :%s' % (self.asset.name, self.model, self.slot,\n self.capacity, self.ram_frequency)\n\n class Meta:\n verbose_name = '内存'\n verbose_name_plural = verbose_name\n # 联合主键约束\n unique_together = ('asset', 'slot') # 同一资产下的内存,根据插槽的不同,必须唯一\n\n\nclass Disk(models.Model):\n \"\"\" 存储设备 \"\"\"\n disk_interface_type_choice = (\n ('SATA', 'SATA'),\n ('SAS', 'SAS'),\n ('SCSI', 'SCSI'),\n ('M.2', 'M.2'),\n ('unknown', 'unknown'),\n )\n\n disk_protocol_choice = (\n ('SATA', 'SATA'),\n ('NVME', 'NVME'),\n ('unknown', 'unknown'),\n )\n\n asset = models.ForeignKey('Asset', on_delete=models.CASCADE)\n sn = models.CharField('硬盘SN号', max_length=128)\n slot = models.CharField('所在插槽位', max_length=64, blank=True, null=True)\n model = models.CharField('磁盘型号', max_length=64, blank=True, null=True)\n manufacturer = models.CharField('磁盘制造商',\n max_length=64,\n blank=True,\n null=True)\n capacity = models.FloatField('磁盘容量(GB)', blank=True, null=True)\n interface_type = models.CharField('接口类型',\n max_length=16,\n choices=disk_interface_type_choice,\n default='unknown')\n disk_protocol = models.CharField('磁盘协议',\n max_length=16,\n choices=disk_protocol_choice,\n default='SATA')\n\n def __str__(self):\n return '%s: %s: %s: %sGB: %s ' % (self.asset.name, self.model,\n self.slot, self.capacity)\n\n class Meta:\n verbose_name = '硬盘'\n verbose_name_plural = verbose_name\n unique_together = ('asset', 'sn')\n\n\nclass NIC(models.Model):\n \"\"\"\n 网卡组件\n 一台设备中可能有很多块网卡,所以网卡与资产是外键的关系\n \"\"\"\n asset = models.ForeignKey('Asset', on_delete=models.CASCADE) # 外键\n name = models.CharField('网卡名称', max_length=64, blank=True, null=True)\n model = models.CharField('网卡型号', max_length=64)\n mac = models.CharField('MAC地址', max_length=64)\n id_address = models.GenericIPAddressField('IP地址', blank=True, null=True)\n net_mask = models.CharField('掩码', max_length=64, blank=True, null=True)\n bonding = models.CharField('绑定地址', max_length=64, blank=True, null=True)\n manufacturer = models.CharField('网卡制造商',\n max_length=64,\n blank=True,\n null=True)\n\n def __str__(self):\n return '%s: %s: %s: %s ' % (self.asset.name, self.model, self.mac)\n\n class Meta:\n verbose_name = '网卡'\n verbose_name_plural = verbose_name\n # 资产、型号和mac必须联合唯一。防止虚拟机中的特殊情况发生错误\n unique_together = ('asset', 'model', 'mac')\n\n\nclass IDC(models.Model):\n \"\"\" 机房\n 机房可以有很多其它字段,比如城市、楼号、楼层和未知等等,如有需要可自行添加; \"\"\"\n name = models.CharField(max_length=64, unique=True, verbose_name='机房名称')\n memo = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='备注')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '机房'\n verbose_name_plural = verbose_name\n\n\nclass Manufacturer(models.Model):\n \"\"\" 厂商 \"\"\"\n name = models.CharField(max_length=64, unique=True, verbose_name='厂商名称')\n telephone = models.CharField(max_length=30,\n blank=True,\n null=True,\n verbose_name='支持电话')\n memo = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='备注')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '厂商'\n verbose_name_plural = verbose_name\n\n\nclass BusinessUnit(models.Model):\n \"\"\" 业务线\n 业务线可以有子业务线,因此使用一个外键关联自身模型 \"\"\"\n parent_unit = models.ForeignKey('self',\n blank=True,\n null=True,\n related_name='parent_level',\n on_delete=models.SET_NULL)\n telephone = models.CharField(max_length=30,\n blank=True,\n null=True,\n verbose_name='业务线')\n memo = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='备注')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '业务线'\n verbose_name_plural = verbose_name\n\n\nclass Contract(models.Model):\n \"\"\" 合同\n 合同模型主要存储财务部门关心的数据; \"\"\"\n sn = models.CharField(max_length=128, unique=True, verbose_name='合同号')\n name = models.CharField(max_length=64, verbose_name='合同名称')\n memo = models.TextField(blank=True, null=True, verbose_name='备注')\n price = models.DecimalField(max_digits=15,\n decimal_places=2,\n verbose_name='合同金额')\n detail = models.TextField(blank=True, null=True, verbose_name='合同详细')\n start_day = models.DateField(blank=True, null=True, verbose_name='开始日期')\n end_day = models.DateField(blank=True, null=True, verbose_name='失效日期')\n license_num = models.IntegerField(blank=True,\n null=True,\n verbose_name='license数量')\n c_day = models.DateField(auto_now_add=True, verbose_name='创建日期')\n m_day = models.DateField(auto_now=True, verbose_name='修改日期')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '合同'\n verbose_name_plural = verbose_name\n\n\nclass Tag(models.Model):\n \"\"\" 资产标签\n 资产标签模型与资产是多对多的关系 \"\"\"\n name = models.CharField(max_length=32, unique=True, verbose_name='标签名')\n c_day = models.DateField(auto_now_add=True, verbose_name='创建日期')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '标签'\n verbose_name_plural = verbose_name\n\n\nclass EventLog(models.Model):\n \"\"\" 日志 \"\"\"\n\n event_type_choice = (\n (0, '其他'),\n (1, '硬件变更'),\n (2, '新增配件'),\n (3, '设备下线'),\n (4, '设备上线'),\n (5, '定期维护'),\n (6, '业务上线/更新/变更'),\n )\n name = models.CharField('事件名称', max_length=128)\n asset = models.ForeignKey('Asset',\n blank=True,\n null=True,\n on_delete=models.SET_NULL) # 当资产审批成功时有这项数据\n event_type = models.SmallIntegerField('时间类型',\n choices=event_type_choice,\n default=4)\n component = models.CharField('事件子项', max_length=256, blank=True, null=True)\n datail = models.TextField('事件详情')\n date = models.DateTimeField('事件时间', auto_now_add=True)\n user = models.ForeignKey(User,\n blank=True,\n null=True,\n verbose_name='事件执行人',\n on_delete=models.SET_NULL)\n memo = models.TextField('备注', blank=True, null=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = '事件记录'\n verbose_name_plural = verbose_name\n\n\nclass NewAssetApprovalZone(models.Model):\n \"\"\" 新资产待审批区 \"\"\"\n asset_type_choice = (\n ('server', '服务器'),\n ('networkdevice', '网络设备'),\n ('storagedevice', '存储设备'),\n ('securitydevice', '安全设备'),\n ('software', '软件资产'),\n )\n sn = models.CharField('资产SN号', max_length=128, unique=True)\n asset_type = models.CharField(choices=asset_type_choice,\n default='server',\n max_length=64,\n blank=True,\n verbose_name='资产类型')\n manufacturer = models.CharField(max_length=64,\n blank=True,\n null=True,\n verbose_name='生产厂商')\n model = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='型号')\n ram_size = models.PositiveIntegerField(blank=True,\n null=True,\n verbose_name='内存大小')\n cpu_model = models.CharField(max_length=128,\n blank=True,\n null=True,\n verbose_name='CPU型号')\n cpu_count = models.PositiveSmallIntegerField(blank=True,\n null=True,\n verbose_name='CPU物理数量')\n cpu_core_count = models.PositiveSmallIntegerField(blank=True,\n null=True,\n verbose_name='CPU核心数量')\n os_distribution = models.CharField('发行商',\n max_length=64,\n blank=True,\n null=True)\n os_type = models.CharField('系统类型', max_length=64, blank=True, null=True)\n os_release = models.CharField('操作系统版本号',\n max_length=64,\n blank=True,\n null=True)\n\n data = models.TextField('资产数据') # 此字段必填\n\n c_time = models.DateTimeField(auto_now_add=True, verbose_name='汇报日期')\n m_time = models.DateTimeField(auto_now=True, verbose_name='批准日期')\n approved = models.BooleanField('是否批准', default=False)\n\n def __str__(self):\n return self.sn\n\n class Meta:\n verbose_name = '新上线待审批资产'\n verbose_name_plural = verbose_name\n ordering = ['-c_time']\n" } ]
4
nissmar/SudokuSolver
https://github.com/nissmar/SudokuSolver
cd456f2c9333dbbc7c0ff646511e813dbf8175cc
fdfc2abfce9411bc962ad12576053d393012d1cd
187f96e642c29335708a4089fddae82f63a06ff9
refs/heads/master
2020-11-25T19:59:06.965681
2020-04-17T23:36:29
2020-04-17T23:36:29
228,822,299
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.3030547499656677, "alphanum_fraction": 0.3457001745700836, "avg_line_length": 28.61419677734375, "blob_id": "b059a6ee2a7812267f921463f393683e33be0362", "content_id": "7aae18902a485ca399bbf485099f67c2e20198f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9928, "license_type": "no_license", "max_line_length": 145, "num_lines": 324, "path": "/sudoku.py", "repo_name": "nissmar/SudokuSolver", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom copy import deepcopy\r\nplt.interactive(True)\r\nfig, ax = plt.subplots()\r\n\r\nglobal Dname\r\n\r\nDname={}\r\n\r\ndef inter(a,b):\r\n c=[]\r\n for elem in b:\r\n if elem in a:\r\n c.append(elem)\r\n return c\r\n \r\n \r\n\r\nclass Sud(): #tableau, pave, case\r\n def __init__(self):\r\n self.tab=np.array([[np.zeros((3,3)) for i in range(3)] for i in range(3)])\r\n self.possibles={}\r\n self.text=np.array([[[[ax.text(3*j+l+0.5, (3-i)*3-k-0.5, '') for l in range(3)] for k in range(3)] for j in range(3)] for i in range(3)])\r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n self.possibles[(i,j,k,l)]=[]\r\n \r\n def getpave(self,i,j): #i,j sont les coordonnées du pavé\r\n L=[i for i in range(1,10)]\r\n for k in range(3):\r\n for l in range(3):\r\n x=self.tab[i,j,k,l]\r\n if x!=0:\r\n if x in L:\r\n L.remove(x)\r\n else: \r\n return []\r\n return L\r\n def getligne(self,i,j,k): #k numéro de la colonnz du pavé\r\n L=[i for i in range(1,10)]\r\n for l0 in range(9):\r\n x=self.tab[i,l0//3,k,l0%3]\r\n if x!=0:\r\n if x in L:\r\n L.remove(x)\r\n else: \r\n return []\r\n return L\r\n def getcolonne(self,i,j,l): #l numéro de la ligne du pavé\r\n L=[i for i in range(1,10)]\r\n for k0 in range(9):\r\n x=self.tab[k0//3,j,k0%3,l]\r\n if x!=0:\r\n if x in L:\r\n L.remove(x)\r\n else: \r\n return []\r\n return L\r\n \r\n def valpossible(self,i,j,k,l): #retourne la liste des valeurs VIDE si il y a erreur\r\n return inter(self.getpave(i,j),inter(self.getligne(i,j,k),self.getcolonne(i,j,l)))\r\n \r\n def updatedic(self):\r\n c=False\r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n if self.tab[i,j,k,l]==0:\r\n L=self.valpossible(i,j,k,l)\r\n if len(L)==1:\r\n self.tab[i,j,k,l]=L[0]\r\n self.text[i,j,k,l].set_text(str(int(self.tab[i,j,k,l])))\r\n self.text[i,j,k,l].set_color('b')\r\n # print(i,j,k,l)\r\n c=True\r\n else:\r\n self.possibles[(i,j,k,l)]=L\r\n return c\r\n \r\n def remplissage(self): #RESOLUTION\r\n min_val, max_val = 0, 9\r\n ind_array = np.arange(min_val + 0.5, max_val + 0.5, 1.0)\r\n x, y = np.meshgrid(ind_array, ind_array)\r\n \r\n self.affichefirst()\r\n \r\n ax.set_xlim(min_val, max_val)\r\n ax.set_ylim(min_val, max_val)\r\n ax.set_xticks(np.arange(max_val))\r\n ax.set_yticks(np.arange(max_val))\r\n ax.grid()\r\n \r\n c=True\r\n while c==True:\r\n c=self.updatedic()\r\n plt.show()\r\n L=self.zero()\r\n if L!=[]:\r\n self.assume(L,1)\r\n # plt.pause(0.01)\r\n \r\n \r\n \r\n def affichefirst(self):\r\n \r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n x=self.tab[i,j,k,l]\r\n if x!=0:\r\n self.text[i,j,k,l].set_text(str(int(x)))\r\n # self.text[i,j,k,l].set_color('k')\r\n def delete(self):\r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n self.text[i,j,k,l].set_text(\"\")\r\n # self.text[i,j,k,l].set_color('k')\r\n \r\n \r\n \r\n def zero(self): #retourne la liste des cases égales à zéro\r\n L=[]\r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n x=self.tab[i,j,k,l]\r\n if x==0:\r\n L.append([i,j,k,l])\r\n return L\r\n \r\n def updatedicassume(self): #0:pas de modif 1: modif 2:erreur \r\n c=0\r\n for i in range(3):\r\n for j in range(3):\r\n for k in range(3):\r\n for l in range(3):\r\n if self.tab[i,j,k,l]==0:\r\n L=self.valpossible(i,j,k,l)\r\n if len(L)==1:\r\n self.tab[i,j,k,l]=L[0]\r\n self.text[i,j,k,l].set_text(str(int(self.tab[i,j,k,l])))\r\n self.text[i,j,k,l].set_color('b')\r\n # plt.pause(0.01)\r\n c=1\r\n elif len(L)==0:\r\n return 2\r\n else:\r\n self.possibles[(i,j,k,l)]=L\r\n return c\r\n \r\n def assume(self,lzero,NUM): #essaie la case ou self possible est minimal\r\n # print('ok')\r\n\r\n k00=2\r\n c=True\r\n while c:\r\n for elem in lzero:\r\n if c:\r\n i,j,k,l=elem\r\n length=len(self.possibles[(i,j,k,l)])\r\n\r\n if length==k00:\r\n # print('ok')\r\n return self.tryval(i,j,k,l,NUM) \r\n c=False\r\n print(k)\r\n k00+=1 \r\n \r\n def tryval(self,i,j,k,l,NUM):\r\n global Dname\r\n k00=0\r\n Dname[NUM]=Sud()\r\n lpossible=self.possibles[(i,j,k,l)]\r\n \r\n c=1\r\n for elem in lpossible:\r\n \r\n k00+=1\r\n Dname[NUM].tab=deepcopy(self.tab)\r\n Dname[NUM].possibles=dict.copy(self.possibles)\r\n Dname[NUM].tab[i,j,k,l]=elem\r\n Dname[NUM].possibles[(i,j,k,l)]=[elem]\r\n \r\n self.text[i,j,k,l].set_text(str(elem))\r\n self.text[i,j,k,l].set_color('r')\r\n c=1\r\n print(NUM,lpossible,Dname[NUM].possibles[(i,j,k,l)], i,j,k,l)\r\n\r\n while c==1:\r\n c=Dname[NUM].updatedicassume()\r\n plt.pause(0.1)\r\n \r\n if c==0:\r\n lzero=Dname[NUM].zero()\r\n if lzero==[]:\r\n return True\r\n else:\r\n if Dname[NUM].assume(lzero,(10+NUM+k00)):\r\n return True\r\n else:\r\n Dname[NUM].delete()\r\n c=2\r\n # return False\r\n \r\n \r\n \r\n\r\n\r\n Dname[NUM].delete()\r\n\r\n return False\r\n \r\n \r\n \r\n \r\n \r\n\r\n# a.tab[0,0]=np.array([[ 0, 0, 1],\r\n# [ 6, 0, 0],\r\n# [ 4, 0, 2]])\r\n# a.tab[0,1]=np.array([[ 0, 0, 0.],\r\n# [ 1, 7, 0],\r\n# [ 0, 0, 0]])\r\n# a.tab[0,2]=np.array([[ 0, 0, 6],\r\n# [ 8, 0, 0],\r\n# [ 5, 7, 1]])\r\n# a.tab[1,0]=np.array([[ 0, 0, 8],\r\n# [ 0, 0., 0],\r\n# [ 0, 4, 0]])\r\n# a.tab[1,1]=np.array([[ 4, 0, 3],\r\n# [ 0, 0, 0],\r\n# [ 5, 0, 9]])\r\n# a.tab[1,2]=np.array([[ 0, 5, 0],\r\n# [ 0, 0., 0],\r\n# [ 6, 0, 0]])\r\n# a.tab[2,0]=np.array([[ 1, 2, 6],\r\n# [ 0, 0, 5],\r\n# [ 8, 0, 0]])\r\n# a.tab[2,1]=np.array([[ 0, 0, 0],\r\n# [ 0, 9, 4],\r\n# [ 0., 0, 0]])\r\n# a.tab[2,2]=np.array([[ 9, 0, 5],\r\n# [ 0, 0, 2],\r\n# [ 7, 0, 0]])\r\n# \r\n\r\n# \r\n# a.tab[0,0]=np.array([[ 0, 0, 0],\r\n# [ 0, 0, 0],\r\n# [ 0, 0, 0]])\r\n# a.tab[0,1]=np.array([[ 0, 0, 0.],\r\n# [ 7, 8, 0],\r\n# [ 0, 0, 0]])\r\n# a.tab[0,2]=np.array([[ 0, 0, 0],\r\n# [ 0, 0, 3],\r\n# [ 9, 0, 7]])\r\n# a.tab[1,0]=np.array([[ 0, 0, 3],\r\n# [ 0, 0., 0],\r\n# [ 0, 9, 0]])\r\n# a.tab[1,1]=np.array([[ 0, 5, 7],\r\n# [ 0, 2, 0],\r\n# [ 0, 0, 0]])\r\n# a.tab[1,2]=np.array([[ 1, 0, 8],\r\n# [ 0, 7., 0],\r\n# [ 0, 0, 5]])\r\n# a.tab[2,0]=np.array([[ 0, 1, 0],\r\n# [ 0, 2, 0],\r\n# [ 0, 0, 8]])\r\n# a.tab[2,1]=np.array([[ 0, 0, 2],\r\n# [ 0, 7, 6],\r\n# [ 0., 3, 5]])\r\n# a.tab[2,2]=np.array([[ 0, 0, 6],\r\n# [ 5, 8, 0],\r\n# [ 7, 0, 9]])\r\n\r\n# \r\na=Sud() \r\na.tab[0,0]=np.array([[ 0, 0, 6],\r\n [ 0, 9, 0],\r\n [ 3, 0, 0]])\r\na.tab[0,1]=np.array([[ 0, 0, 1],\r\n [ 0, 3, 0],\r\n [ 6, 0, 0]])\r\na.tab[0,2]=np.array([[ 0, 0, 5],\r\n [ 0, 2, 0],\r\n [ 8, 0, 0]])\r\na.tab[1,0]=np.array([[ 6, 0, 0],\r\n [ 0, 8, 0],\r\n [ 0, 0, 7]])\r\na.tab[1,1]=np.array([[ 4, 0, 0],\r\n [ 0, 6, 0],\r\n [ 0, 0, 3]])\r\na.tab[1,2]=np.array([[ 2, 0, 0],\r\n [ 0, 5, 0],\r\n [ 0, 0, 9]])\r\na.tab[2,0]=np.array([[ 0, 0, 3],\r\n [ 0, 2, 0],\r\n [ 4, 0, 0]])\r\na.tab[2,1]=np.array([[ 0, 0, 8],\r\n [ 0, 9, 0],\r\n [ 3, 0, 0]])\r\na.tab[2,2]=np.array([[ 0, 0, 6],\r\n [ 0, 4, 0],\r\n [ 5, 0, 0]])\r\n \r\na.remplissage()\r\n\r\ndef sudoku(): #REMPLIR CASE PAR CASE\r\n a=Sud()\r\n for i in range(3):\r\n for j in range(3):\r\n l = list(map(int, input().split()))\r\n # print(l)\r\n a.tab[i,j]=np.array([l[:3],l[3:6],l[6:]])\r\n return a\r\n" }, { "alpha_fraction": 0.8059701323509216, "alphanum_fraction": 0.8059701323509216, "avg_line_length": 32.5, "blob_id": "42895093174c7ddeea7c2227f8e9fbc1742d0876", "content_id": "931a356aa15065da53b578dfddd8a8443a69e5ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 67, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "nissmar/SudokuSolver", "src_encoding": "UTF-8", "text": "# SudokuSolver\nThis is a handmade Sudoku Solver written in Python.\n" } ]
2
archbars/pathsandfolders
https://github.com/archbars/pathsandfolders
c488ee9c7488583f9a26f2f34386271c0c5b0f44
0a59abb60870bf2316dff9cc42705e7a66e854a0
ad8bb89df5fadc4aade7f275c4d1e9e8e6463b30
refs/heads/master
2020-03-19T13:24:42.728962
2018-09-19T13:05:34
2018-09-19T13:05:34
136,578,202
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6521374583244324, "alphanum_fraction": 0.6663872599601746, "avg_line_length": 29.58974266052246, "blob_id": "9f1268a4715faeeda03cafce5532a3c0c1bc9bef", "content_id": "bb1c41b604a56510dbc5af46a432420fa2e1d07e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3146, "license_type": "no_license", "max_line_length": 102, "num_lines": 78, "path": "/find_procedure.py", "repo_name": "archbars/pathsandfolders", "src_encoding": "UTF-8", "text": "# Задание\n# мне нужно отыскать файл среди десятков других\n# я знаю некоторые части этого файла (на память или из другого источника)\n# я ищу только среди .sql файлов\n# 1. программа ожидает строку, которую будет искать (input())\n# после того, как строка введена, программа ищет её во всех файлах\n# выводит список найденных файлов построчно\n# выводит количество найденных файлов\n# 2. снова ожидает ввод\n# поиск происходит только среди найденных на этапе 1\n# 3. снова ожидает ввод\n# ...\n# Выход из программы программировать не нужно.\n# Достаточно принудительно остановить, для этого можете нажать Ctrl + C\n\n# Пример на настоящих данных\n\n# python3 find_procedure.py\n# Введите строку: INSERT\n# ... большой список файлов ...\n# Всего: 301\n# Введите строку: APPLICATION_SETUP\n# ... большой список файлов ...\n# Всего: 26\n# Введите строку: A400M\n# ... большой список файлов ...\n# Всего: 17\n# Введите строку: 0.0\n# Migrations/000_PSE_Application_setup.sql\n# Migrations/100_1-32_PSE_Application_setup.sql\n# Всего: 2\n# Введите строку: 2.0\n# Migrations/000_PSE_Application_setup.sql\n# Всего: 1\n\n# не забываем организовывать собственный код в функции\n\nimport os\n\nmigrations = 'Migrations'\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef open_file_and_find_keyword(file, input_string, local_list):\n with open(file, 'r') as current_file:\n for line in current_file:\n if input_string in line:\n print(file)\n local_list.append(file)\n break\n return local_list\n\n\ndef find_sql(list_of_files, input_string):\n local_list = []\n if len(list_of_files) == 0:\n for d, dirs, files in os.walk(os.path.join(current_dir, migrations)): # Просматриваем каталог\n for file in files: # идем по каждому файлу\n path = os.path.join(d, file) # формирование полного пути файла\n if path.endswith('.sql'):\n local_list = open_file_and_find_keyword(str(path), input_string, local_list)\n else:\n for file in list_of_files:\n local_list = open_file_and_find_keyword(file, input_string, local_list)\n\n print(\"Всего: \", len(local_list))\n return local_list\n\n\ndef start_func():\n local_list = []\n while True:\n key = input(\"Введите строку для поиска: \")\n local_list = find_sql(local_list, key)\n\n\nif __name__ == '__main__':\n start_func()\n" } ]
1
GyeoreLee/naver_geocode_translator
https://github.com/GyeoreLee/naver_geocode_translator
6e61e0b82f5b0f415960bc6cbec290b599326612
4d7cadebfa81b1e41da4679cd7fa14162a2fb625
f2bcc1fe6158787d2f3ac1abb9fce3c0730ff780
refs/heads/master
2020-05-15T11:13:04.602135
2019-06-19T05:42:35
2019-06-19T05:42:35
182,216,169
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5330296158790588, "alphanum_fraction": 0.553530752658844, "avg_line_length": 17.913043975830078, "blob_id": "df48d8724c91591a0c82e3d1677a58d0d522850a", "content_id": "ae4c44af165db38ab38ca18d8adfe5a629d25108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 603, "license_type": "no_license", "max_line_length": 113, "num_lines": 23, "path": "/README.MD", "repo_name": "GyeoreLee/naver_geocode_translator", "src_encoding": "UTF-8", "text": "Naver_geocode_translator \n===========================================================\n\n## - 설명\n\n네이버 지도 API를 사용해서 csv파일에 기록된 주소(지번, 도로명)를 위도, 경도로 변환하여 기록\n\n\n\n## - 사용 방법\n\n### 1. 설치\n\n- 파이썬 관련 패키지 설치\n ```\n sudo pip install -r requirements.txt\n ```\n### 2. 입력 파일 구조\n- 링크의 파일과 같이 구성(csv파일로 저장)\n- 파일명 : 주소_데이터.csv https://docs.google.com/spreadsheets/d/1ylRIdCFnnP91ARMJZccV0YmRAQVsgw1nrB8CZvgfdvE/edit#gid=0\n\t```\n\tpython main.py\n\t```\n\n\n\n\n" }, { "alpha_fraction": 0.5095195174217224, "alphanum_fraction": 0.5154125094413757, "avg_line_length": 28.426666259765625, "blob_id": "becd0053fd0b827008e9a68f56c2990918bf7836", "content_id": "2b862c3dd75179415e6a4513e426e976456db40e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2310, "license_type": "no_license", "max_line_length": 99, "num_lines": 75, "path": "/src/main.py", "repo_name": "GyeoreLee/naver_geocode_translator", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport urllib.request\nimport datetime\nimport time\nimport json\nimport pandas as pd\n\n\ndef get_request_url(url,client_id='vabkyzx1im', client_secret='NZttREt7fWJ4jat98knMXWcYjGSnNiy468I7rh7x'):\n req = urllib.request.Request(url)\n req.add_header(\"X-NCP-APIGW-API-KEY-ID\", client_id)\n req.add_header(\"X-NCP-APIGW-API-KEY\", client_secret)\n try:\n response = urllib.request.urlopen(req)\n if response.getcode() == 200:\n print(\"[%s] Url Request Success\" % datetime.datetime.now())\n return response.read().decode('utf-8')\n except Exception as e:\n print(e)\n print(\"[%s] Error for URL : %s\" % (datetime.datetime.now(), url))\n return None\n\n\ndef getGeoData(address):\n base = \"https://naveropenapi.apigw.ntruss.com/map-geocode/v2/geocode\"\n node = \"\"\n parameters = \"?query=%s\" % urllib.parse.quote(address)\n url = base + node + parameters\n\n retData = get_request_url(url)\n\n if (retData == None):\n return None\n else:\n return json.loads(retData)\n\n\ndef main():\n # 엑셀 읽기\n df = pd.read_csv('주소_데이터.csv')\n df['위도'] = 0\n df['경도'] = 0\n i = 0\n for id, address,y,x in df.values:\n #API 사용\n try:\n jsonResult = getGeoData(address)\n except:\n i = i +1\n continueㄴ\n print('index, %d ,검색 주소 : %s'%(i,address))\n\n # 결과 parsing\n if 'addresses' in jsonResult.keys():\n print('총 검색 결과: ', jsonResult['addresses'].__len__())\n if jsonResult['addresses'].__len__() >=1:\n item = jsonResult['addresses'][0]\n print('=======================')\n print('위도: ', str(item['y']))\n print('경도: ', str(item['x']))\n print('=======================')\n #df.iloc[i] = {'ID':id,'주소':address,'위도': float(item['y']), '경도': float(item['x'])}\n y_update = pd.Series([float(item['y'])],name='위도', index=[i])\n x_update = pd.Series([float(item['x'])], name='경도', index=[i])\n df.update(y_update)\n df.update(x_update)\n\n i = i +1\n\n\n #엑셀 쓰기\n df.to_csv('주소_API_결과_데이터.csv')\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.4375, "alphanum_fraction": 0.671875, "avg_line_length": 15, "blob_id": "fd2a3d3e82e5200745720a085474dfcddca4e20c", "content_id": "adad2a9e78f0042fc030b5adec3ebb7a1393df11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 64, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/requirements.txt", "repo_name": "GyeoreLee/naver_geocode_translator", "src_encoding": "UTF-8", "text": "numpy==1.16.4\npandas==0.24.2\npython-dateutil==2.8.0\nsix==1.12.0\n" } ]
3
JohnBogdan1/Kaggle
https://github.com/JohnBogdan1/Kaggle
bfdc9e95e2a62345a9e63c1f78837882b94b3ce7
928425367050e08b72dbe6a55ebb78b337cadbfc
2078f8490fe6a1dbe7b3fecba256a9077f2775c3
refs/heads/master
2020-05-22T00:45:13.772440
2019-05-11T20:46:26
2019-05-11T20:46:26
186,179,447
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5854175686836243, "alphanum_fraction": 0.6023738980293274, "avg_line_length": 34.208953857421875, "blob_id": "01064ba82caba979907f0f71b9bc543c04b505e8", "content_id": "9e3fd6ff031ed6479cac7d37df7bc26722cc3c4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9436, "license_type": "no_license", "max_line_length": 155, "num_lines": 268, "path": "/Santander Customer Transaction Prediction/sctp.py", "repo_name": "JohnBogdan1/Kaggle", "src_encoding": "UTF-8", "text": "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\nfrom sklearn import svm, tree, metrics\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, BaggingClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC\nfrom sklearn.naive_bayes import GaussianNB\nimport lightgbm as lgb\nfrom sklearn.preprocessing import StandardScaler\n\nfrom copy import deepcopy\n\n\ndef clf_training_method(clf, features_train, features_test, labels_train, labels_test):\n print(\"####### TRAIN & PREDICT #######\")\n clf.fit(features_train, labels_train)\n pred = clf.predict(features_test)\n acc = metrics.accuracy_score(labels_test, pred)\n print(\"The prediction accuracy of classification is [%f].\" % acc)\n\n print(\"-> DONE [TRAIN & PREDICT].\\n\")\n\n return acc\n\n\ndef clf_cross_validation(clf, features, labels):\n print(\"####### CROSS VALIDATION #######\")\n\n auc_arr_scores = cross_val_score(clf, features, labels, scoring='roc_auc', cv=5)\n auc_acc = auc_arr_scores.mean()\n\n print(\"The prediction accuracy of classification is [%f].\" % auc_acc)\n\n print(\"-> DONE [CROSS VALIDATION].\\n\")\n \n return auc_acc\n\n\ndef clf_train_submit(clf, features, labels, tests):\n print(\"####### TRAIN & PREDICT #######\")\n\n clf.fit(features, labels)\n pred = clf.predict_proba(tests)\n\n print(\"-> DONE [TRAIN & PREDICT].\\n\")\n\n return pred\n\n\ndef check_values(data):\n columns = [\"var_\" + str(i) for i in range(200)]\n\n big_values = []\n small_values = []\n for col in columns:\n big_values.append(data.loc[data[col].idxmax()][col])\n small_values.append(data.loc[data[col].idxmin()][col])\n\n print(\"MAX VALUE:\", max(big_values))\n print(\"MIN VALUE:\", min(small_values))\n\ndef get_columns(data):\n col_list = list(data)\n if 'ID_code' in col_list:\n col_list.remove('ID_code')\n \n if 'target' in col_list:\n col_list.remove('target')\n \n return col_list\n\ndef find(arr):\n v = [x for x in arr if x < 0]\n \n return len(v)\n\ndef adjust_features(data):\n col_list = get_columns(data)\n for col in col_list:\n data[col] *= -1\n #data[\"sum_up\"] = data[col_list].sum(axis=1)\n #data[\"average\"] = data[col_list].sum(axis=1) / len(col_list)\n # print(data.head())\n\n \"\"\"for col in col_list:\n new_col = col + \"_squared\"\n data[new_col] = data[col] ** 2\n\n data = data.drop(col_list, axis=1)\n \n col_list = get_columns(data)\n\n #data[\"sum_up_squared\"] = data[col_list].sum(axis=1)\n print(data.head())\"\"\"\n #print(list(data))\n\n\ndef load_dataset(csv_file, submit=False):\n print(\"####### DATA PROCESSING #######\")\n data = pd.read_csv(csv_file)\n\n if not submit:\n data = data.sample(frac=0.1)\n\n print(\"There are %d rows to process in dataset.\" % data.shape[0])\n\n # print(data.head())\n\n missing_values = data.isnull().sum()\n # print(missing_values)\n missing_values = missing_values[missing_values > 0] / data.shape[0]\n print(\"Percent of missing values\\n%s\\n\" % (missing_values * 100 if len(missing_values) > 0 else 0))\n\n if len(missing_values) > 0:\n # replace NaN values with 0\n data = data.fillna(0)\n\n # print(data.dtypes)\n\n check_values(data)\n\n if not submit:\n if \"target\" in data:\n print(\"# of 0 in target: %d\" % list(data[\"target\"]).count(0))\n print(\"# of 1 in target: %d\" % list(data[\"target\"]).count(1))\n \n adjust_features(data)\n\n print(\"-> DONE [DATA PROCESSING].\\n\")\n\n return data\n\n\ndef train_classifier(train_data, test_data, submit=False, use_lgbm=False):\n print(\"####### PREPARE TRAIN / VALIDATION DATA #######\")\n\n # adjust the classifier using train.csv\n # set initial features and labels\n features = train_data.drop(['ID_code', 'target'], axis=1)\n labels = train_data['target']\n \n random_state = 42\n np.random.seed(random_state)\n\n if not submit:\n # train / test split\n features_train, features_test, labels_train, labels_test = train_test_split(features, labels,\n test_size=0.25, random_state=42)\n\n # standardization of training data\n scaler = StandardScaler().fit(features_train)\n features_train = scaler.transform(features_train)\n features_test = scaler.transform(features_test)\n \n if use_lgbm:\n scaler = StandardScaler().fit(features)\n features = scaler.transform(features)\n\n # print(features_train)\n # print(features_test)\n else:\n orig_test_data = deepcopy(test_data)\n test_data = test_data.drop(['ID_code'], axis=1)\n scaler = StandardScaler().fit(features)\n features = scaler.transform(features)\n tests = scaler.transform(test_data)\n\n print(\"-> DONE [PREPARE TRAIN / VALIDATION DATA].\\n\")\n\n print(\"####### PREPARE CLASSIFIER #######\")\n \n if not use_lgbm:\n\n param_grid = {\"base_estimator__criterion\": [\"gini\", \"entropy\"],\n \"base_estimator__splitter\": [\"best\", \"random\"],\n \"n_estimators\": [i * 10 for i in [1, 10, 100, 1000]],\n \"base_estimator__min_samples_split\": [i * 10 for i in [1, 5, 8, 10]],\n \"base_estimator__min_samples_leaf\": [i * 10 for i in [1, 5, 8, 10]],\n \"base_estimator__max_depth\": [None, 1, 2, 3, 4, 5]\n }\n \n \"\"\"clf = AdaBoostClassifier(\n base_estimator=tree.DecisionTreeClassifier(criterion=\"entropy\", splitter=\"random\",\n min_samples_split=80, min_samples_leaf=80, max_depth=1,\n ), n_estimators=1000, learning_rate=1)\n \n clf = AdaBoostClassifier(\n base_estimator=LinearSVC(max_iter = 10000), algorithm='SAMME', n_estimators=100, learning_rate=1)\"\"\"\n \n \"\"\"clf = AdaBoostClassifier(\n base_estimator=GaussianNB(var_smoothing=1e-15), algorithm='SAMME', n_estimators=1000, learning_rate=1, random_state=99999)\"\"\"\n \n clf = BaggingClassifier(\n base_estimator=GaussianNB(var_smoothing=1e-15), n_estimators=100, random_state=99999, n_jobs = -1, bootstrap_features = True, oob_score = True)\n\n \"\"\"clf = AdaBoostClassifier(\n base_estimator=LogisticRegression(solver='newton-cg', max_iter=100, n_jobs=-1, tol=1e-4, C=1), \n algorithm='SAMME.R', n_estimators=100, learning_rate=1)\"\"\"\n \n # clf = GradientBoostingClassifier(min_samples_split=80, min_samples_leaf=80, n_estimators=100, learning_rate=1)\n \n # clf = tree.DecisionTreeClassifier()\n \n \"\"\"clf = AdaBoostClassifier(base_estimator=tree.DecisionTreeClassifier(), n_estimators=100, learning_rate=1)\n grid_search_ABC = GridSearchCV(clf, param_grid=param_grid, scoring='roc_auc', cv=5)\n grid_search_ABC.fit(features, labels)\"\"\"\n \n else:\n params = {\n \"objective\" : \"binary\", \"metric\" : \"auc\", \"boosting\": 'gbdt', \"max_depth\" : -1, \"num_leaves\" : 13,\n \"learning_rate\" : 0.01, \"bagging_freq\": 5, \"bagging_fraction\" : 0.4, \"feature_fraction\" : 0.05,\n \"min_data_in_leaf\": 80, \"tree_learner\": \"serial\", \"boost_from_average\": \"true\",\n \"bagging_seed\" : random_state, \"verbosity\" : 1, \"seed\": random_state\n }\n train_data = lgb.Dataset(features, label=labels)\n num_round = 10000\n\n if submit:\n if not use_lgbm:\n pred = clf_train_submit(clf, features, labels, tests)\n else:\n bst = lgb.train(params, train_data, num_round)\n pred = bst.predict(tests)\n submission = pd.DataFrame({\"ID_code\": orig_test_data.ID_code.values})\n if not use_lgbm:\n submission[\"target\"] = pred[:, 1]\n else:\n submission[\"target\"] = pred[:]\n submission.to_csv(\"submission.csv\", index=False)\n else:\n \n if use_lgbm:\n print(lgb.cv(params, train_data, num_round, nfold=5)['auc-mean'][-1])\n else:\n # used for tweaking\n clf_training_method(clf, features_train, features_test, labels_train, labels_test)\n # clf_cross_validation(clf, features, labels)\n\n # print(\"###BEST###\")\n # print(grid_search_ABC.best_params_)\n\n print(\"-> DONE [PREPARE CLASSIFIER].\\n\")\n\n\ndef main():\n submit = True\n use_lgbm = True\n train_data = load_dataset(\"../input/train.csv\", submit=submit)\n test_data = None\n test_data = load_dataset(\"../input/test.csv\", submit=submit)\n train_classifier(train_data, test_data=test_data, submit=submit, use_lgbm=use_lgbm)\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
ygorg/deflog
https://github.com/ygorg/deflog
559c71b5873b834f888ce215d923a75561a6a311
3c8667befac102a2f5a3c9f4a14d8d7926237085
f38c3f6ba18015bd7edc89674d7f220f3acd3c25
refs/heads/master
2021-06-19T16:18:56.949588
2017-07-19T19:10:25
2017-07-19T19:10:25
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7085837125778198, "alphanum_fraction": 0.7163090109825134, "avg_line_length": 30.917808532714844, "blob_id": "11d6e494c4af9abbba7b69c009a71742a976cd0c", "content_id": "9688ceb349150f2bd82bb1d7157b4b70e32dba81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2336, "license_type": "no_license", "max_line_length": 229, "num_lines": 73, "path": "/README.md", "repo_name": "ygorg/deflog", "src_encoding": "UTF-8", "text": "# [Deflog](https://github.com/sbruno/deflog)\n\n\n## Description\n\nThis program tries to eliminate several characteristics of the language flogger, like repetition of letters, alternation of uppercase and lowercase, abbreviations of sms, etc. for spanish. It is presented in a number of versions:\n\n\n### Web Versions\n\n* [PHP](./php/)\n* [Javascript](./javascript/)\n* [Python w/ cherrypy](./python-cherrypy/)\n\n### Desktop Versions\n\n* [PyQT4](./pyqt/)\n* [CLI](./deflog.py)\n* Others\n\t- [Plugin for Messenger Plus! Live](./msnlive_plugin/)\n\t- [Python module](./pylibdeflog/)\n\n\n## Screenshots\n\n### Online version:\n\n![Online version](http://bananabruno.googlepages.com/deflog-javascript-screenshot-small.jpg)\n\n### Messenger Plus! Live Plugin:\n\n![Plugin for Messenger PLus! Live](http://bananabruno.googlepages.com/deflog-msnlive-screenshot-small.jpg)\n\n\n## Download\n\nThe latest stable versions can be downloaded from http://code.google.com/p/deflog/downloads/list\n\nThe plugin for Messenger Plus! Live was available through their website : http://www.msgpluslive.net/scripts/view/404-DeFlog/\n\n\n## Try it !\n\n[PHP Version](http://www.santiagobruno.com.ar/php/desfotologuear.php)\n\n[JS Version](http://www.santiagobruno.com.ar/javascript/desfotologuear.html)\n\n\n## Description of the methods\n\nMost of the method are made for spanish.\n\n§ = The method is language independant.\n\nAll methods can be applied selectively\n\n* **Desmultiplicar §**: Removes letters repetition (holaaaaa -> hola)\n\n* **Deszezear**: Transforms \\'z\\' in \\'s\\' (Deactivated by default because it creates more harm than good)\n\n* **Des-k-ar**: Transforms \\'k\\' in \\'q\\' (ki -> qui)\n\n* **DesSMSar**: Replace SMS abbreviations (xq -> por que, dsp -> después)\n\n* **Desestupidizar**: (toi -> estoy, i -> y, lemdo -> lindo)\n\n* **Desalternar §**: Convert mixed lowercase uppercase words to lowercase and keeps uppercased word (Letra DE UnA CaNcIoN -> letra DE una cancion)\n\n* **Desporteñar**: Removes finals \\'s\\'s in words ending in \\'istes\\' (lo vistes y me dijistes -> lo viste y me dijiste)\n\n* **Deleet §**: Convert l33t 5p34k to standard speak (3s7o e5 un 73x70 f30 -> esto es un texto feo)\n\n* **Fix missing vowels**: Add missing vowels (it doesn\\'t work english words) (stamos -> estamos, spero -> espero, dcile -> decile, nterado -> enterado, vrdad -> verdad, comprart, comprarte)\n" }, { "alpha_fraction": 0.6364040970802307, "alphanum_fraction": 0.6412994861602783, "avg_line_length": 36.45000076293945, "blob_id": "58ec612540d1b14ec1f2b00c66debccfc4e09699", "content_id": "6b355b97aafecfb1d483adeda519896f1cbed53e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4495, "license_type": "no_license", "max_line_length": 241, "num_lines": 120, "path": "/deflog.py", "repo_name": "ygorg/deflog", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: Santiago Bruno\n# License: GPL v3\n# Web pages: http://www.santiagobruno.com.ar/programas.html\n# http://code.google.com/p/deflog/\n\nimport re\nimport sys\nimport argparse\nfrom pylibdeflog.libdeflog import *\n\n\n\"\"\"\nCategorization of the different treatments\n\n Lang independant\n\"deleet\" : l33t 5p34k -> leet speak (no transliteration 2morrow -> twomorrow)\n\"desalternar\" : keep all lowercase or uppercase\n\"desmultiplicar\" : olaaaaa -> ola (no language model to disambiguate gooood -> {good, god}\n\n Spanish\n\"dessmsar\" : using a dictionnary\n\"desestupidizar\" : using a dictionnary\n\"deszezear\" : replace s by z\n\"deskar\" : replace k by c\n\"desporteniar\" : remove s at the end of word ending in 'istes'\n\"fixmissingvowels\" : add vowels where they are missing following spanish language rules\n\"\"\"\n\nordinate_operations = ['deleet', 'desalternar', 'desmultiplicar', 'dessmsar', 'desestupidizar', 'deszezear', 'deskar', 'desporteniar', 'fixmissingvowels']\n\nfunc_of_operation = {\n \"deleet\": deleet,\n \"desalternar\": desalternar,\n \"desmultiplicar\": desmultiplicar,\n \"dessmsar\": lambda x: desms(x, format='plain'),\n \"desestupidizar\": lambda x: desestupidizar(x, format='plain'),\n \"deszezear\": deszezear,\n \"deskar\": desk,\n \"desporteniar\": desporteniar,\n \"fixmissingvowels\": fixmissingvowels\n}\n\nwords_re = re.compile(u\"([\\\\w\\\\d+]+)\", re.UNICODE)\n\ndef translate(origText, opt=None):\n if opt is None:\n opt = dict()\n\n text = dessimbolizar(origText)\n words = words_re.split(text)\n\n print(words)\n\n for operation in ordinate_operations:\n try:\n if opt[operation] is True:\n words = map(func_of_operation[operation], words)\n except KeyError as e:\n ()\n\n return ''.join(words)\n\n\ndef main():\n\n def arguments():\n parser = argparse.ArgumentParser(description='Translator of Spanish Fotolog and SMS language to Spanish. By default all the rules are applied. Choose the one yu want to apply using the arguments.')\n\n parser.add_argument('--desmultiplicar', action='store_true', help='Removes letters repetition (holaaaaa -> hola)')\n parser.add_argument('--deszezear', action='store_true', help='Transforms \\'z\\' in \\'s\\' (Deactivated by default because it creates more harm than good)')\n parser.add_argument('--deskar', action='store_true', help='Transforms \\'k\\' in \\'q\\' (ki -> qui)')\n parser.add_argument('--dessmsar', action='store_true', help='Replace SMS abbreviations (xq -> por que, dsp -> después)')\n parser.add_argument('--desestupidizar', action='store_true', help='(toi -> estoy, i -> y, lemdo -> lindo)')\n parser.add_argument('--desalternar', action='store_true', help='Convert mixed lowercase uppercase words to lowercase and keeps uppercased word (Letra DE UnA CaNcIoN -> letra DE una cancion)')\n parser.add_argument('--desporteniar', action='store_true', help='Removes finals \\'s\\'s in words ending in \\'istes\\' (lo vistes y me dijistes -> lo viste y me dijiste)')\n parser.add_argument('--deleet', action='store_true', help='Convert l33t 5p34k to standard speak (3s7o e5 un 73x70 f30 -> esto es un texto feo)')\n parser.add_argument('--missing-vowels', action='store_true', help='Add missing vowels (it doesn\\'t work english words) (stamos -> estamos, spero -> espero, dcile -> decile, nterado -> enterado, vrdad -> verdad, comprart, comprarte)')\n\n parser.add_argument('--lang-independant', action='store_true', help='Shortcut for --deleet --desalternar --desmultiplicar')\n return parser.parse_args()\n\n args = arguments()\n\n default_opt = {\n \"deleet\": True,\n \"desalternar\": True,\n \"desmultiplicar\": True,\n \"dessmsar\": True,\n \"desestupidizar\": True,\n \"deszezear\": args.deszezear,\n \"deskar\": True,\n \"desporteniar\": True,\n \"fixmissingvowels\": True\n }\n\n lang_independant = {\n \"deleet\": True,\n \"desalternar\": True,\n \"desmultiplicar\": True\n }\n\n opt = dict()\n\n for key, value in vars(args).items():\n if value and key == 'lang_independant':\n opt.update(lang_independant)\n elif value:\n opt[key] = True\n\n if not (opt.keys() - {'deszezear'}):\n opt.update(default_opt)\n\n for line in sys.stdin:\n print(translate(line, opt))\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7578431367874146, "alphanum_fraction": 0.7620915174484253, "avg_line_length": 43.34782791137695, "blob_id": "0c323b952a8bfc160d7c4134c8a1d6f8fe39b8a4", "content_id": "c713ee054b7bf6ae2bc186d97f0dc27d51217645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3096, "license_type": "no_license", "max_line_length": 351, "num_lines": 69, "path": "/README_orig.md", "repo_name": "ygorg/deflog", "src_encoding": "UTF-8", "text": "# [Deflog](https://github.com/sbruno/deflog)\n\n## Descripción\n\nEste programa intenta eliminar varias de las características del lenguaje flogger, como repetición de letras, alternación de mayúsculas y minúsculas, abreviaturas de sms, etc. Se presenta en un varias versiones:\n\n### Versiones web\n\n* PHP\n* Javascript\n* Python usando cherrypy\n\n### Versiones desktop\n\n* PyQT4\n* CLI\n* Otres\n\t- Como un plugin para Messenger Plus! Live.\n\t- Módulo python con las funciones utilizadas.\n\n## Capturas de pantalla\n\n### Captura de la versión online:\n\n![Online version](http://bananabruno.googlepages.com/deflog-javascript-screenshot-small.jpg)\n\n### Captura del plugin para Messenger Plus! Live:\n\n![Plugin for Messenger PLus! Live](http://bananabruno.googlepages.com/deflog-msnlive-screenshot-small.jpg)\n\n## Descargas\n\nLas descargas recomendadas para las cuatro variantes del programa son las que aparecen en Featured Downloads.\n\nEl plugin puede ser descargado también desde el sitio de Messenger Plus! Live : http://www.msgpluslive.net/scripts/view/404-DeFlog/\n\nPero esa puede no ser la última versión. Para descargar la última versión, hacerlo desde http://code.google.com/p/deflog/downloads/list\n\nO para ver alguna novedad experimental (?), el trunk del repositorio: http://deflog.googlecode.com/svn/trunk/msnlive_plugin/package/deflog.plsc\n\nDISCLAIMER: El plugin solo puede aplicar todos los métodos correctamente a los mensajes salientes. Es una limitación del messenger como se dice en mi página o la página del plugin en el sitio de Messenger Plus! Live\n\n## Probar Online\n\n[Versión en PHP](http://www.santiagobruno.com.ar/php/desfotologuear.php)\n\n[Versión en Javascript](http://www.santiagobruno.com.ar/javascript/desfotologuear.html)\n\n## Descripción de los métodos aplicables al texto\n\n* **Desmultiplicar**: Elimina repeticiones de letras (holaaaaa -> hola)\n\n* **Deszezear**: Transforma zetas en eses (Desactivado por defecto ya que no es nada inteligente, y si el texto está relativamente bien escrito generará más errores de ortografía de los que solucionará)\n\n* **Des-k-ar**: Similar a Deszezear pero para k -> c. Además transforma ki en qui.\n\n* **DesSMSar**: Elimina abreviaturas SMS (xq -> por que, dsp -> después)\n\n* **Desestupidizar**: (toi -> estoy, i -> y, lemdo -> lindo)\n\n* **Desalternar**: Convierte palabras con mezcla de mayúsculas y minúsculas a minúscula (LeTrA dE uNa CaNcIoN -> letra de una cancion)\n\n* **Desporteñar**: Elimina las eses finales en palabras que terminan en istes (lo vistes y me dijistes -> lo viste y me dijiste)\n\n* **Deleet**: Convierte a letra los números que se usan como letra (3s7o e5 un 73x70 f30 -> esto es un texto feo)\n\n* **Fix missing vowels**: Arega vocales omitidas el final de las palabras (va a fallar en palabras en inglés, porque se supone que en español muy pocas palabras finalizan por ejemplo en 't', entonces se asume que se le debe agregar una e) (stamos -> estamos, spero -> espero, dcile -> decile, nterado -> enterado, vrdad -> verdad, comprart, comprarte)\n\nTodos los métodos pueden aplicarse selectivamente\n" } ]
3
JRReynosa/metricadataanalysis
https://github.com/JRReynosa/metricadataanalysis
d979c85c7b946e2deac973c10592d9053de29926
0f97a7f5a5806e7fe9312d4651e37021a86aa949
ea1668c251e0da88349f1fbcb8e1d5fec3c63b59
refs/heads/master
2022-11-20T14:04:40.376734
2020-07-22T15:11:04
2020-07-22T15:11:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4951575696468353, "alphanum_fraction": 0.51637202501297, "avg_line_length": 31.363183975219727, "blob_id": "3c95d99a5b14e67ce7717fb2a0dae5e9e457de8e", "content_id": "3848e561abcbd03ecdd3359820f0d133712712fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6505, "license_type": "no_license", "max_line_length": 114, "num_lines": 201, "path": "/modules/helper_methods.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n\ndef determine_outcome(row_type, row_subtype, rowahead_type, rowahead_subtype):\n lostball = rowahead_type == \"BALL LOST\" and rowahead_subtype != np.nan and \\\n not (\"FORCED\" or \"THEFT\" or \"CLEARANCE\" or \"END HALF\") in str(rowahead_subtype)\n outcome = {\n \"PASS\": 1 if not lostball else 0,\n \"SHOT\": 1 if \"GOAL\" in str(row_subtype) else 0,\n }\n\n return outcome.get(row_type, None)\n\n\ndef get_events_dataframe(data_location):\n eventsdf = pd.read_csv(data_location, error_bad_lines=False)\n eventsdf.columns = ['team', 'type', 'subtype', 'period', 'start_frame', 'start_time', 'end_frame',\n 'end_time', 'from_player', 'to_player', 'start_x', 'start_y', 'end_x', 'end_y']\n\n all_events = []\n field_dimen = (106., 68.)\n row_iterator = eventsdf.iterrows()\n _, row = next(row_iterator) # Get first row\n for index, rowahead in row_iterator:\n attributes = {\n \"team\": row.team,\n \"period\": row.period,\n \"type\": row.type,\n \"subtype\": row.subtype,\n \"outcome\": determine_outcome(row.type, row.subtype, rowahead.type, rowahead.subtype),\n \"from_player\": row.from_player,\n \"to_player\": row.to_player,\n\n \"start_frame\": row.start_frame,\n \"end_frame\": row.end_frame,\n\n \"start_time\": row.start_time,\n \"end_time\": row.end_time,\n\n \"start_x\": (row.start_x - .5) * 106., # Change field dimensions to 106x68 meters\n \"start_y\": (row.start_y - .5) * 68.,\n \"end_x\": (row.end_x - .5) * 106.,\n \"end_y\": (row.end_y - .5) * 68.,\n }\n all_events.append(attributes)\n row = rowahead\n all_eventsdf = pd.DataFrame(all_events)\n\n return all_eventsdf\n\n\ndef get_tracking_dataframe(data_location):\n trackingdf = pd.read_csv(data_location, error_bad_lines=False, dtype=str)\n trackingdf = trackingdf.drop([0, 1]).reset_index(drop=True)\n trackingdf.columns = [\"period\", \"frame\", \"time\", \"player11x\", \"player11y\", \"player1x\", \"player1y\", \"player2x\",\n \"player2y\",\n \"player3x\", \"player3y\", \"player4x\", \"player4y\", \"player5x\", \"player5y\", \"player6x\",\n \"player6y\",\n \"player7x\", \"player7y\", \"player8x\", \"player8y\", \"player9x\", \"player9y\", \"player10x\",\n \"player10y\",\n \"player12x\", \"player12y\", \"player13x\", \"player13y\", \"player14x\", \"player14y\", \"ballx\",\n \"bally\"]\n return trackingdf\n\n\ndef get_all_action(event_dataframe, action):\n all_actions = []\n\n for index, row in event_dataframe.iterrows():\n if row.type == action:\n attributes = {\n \"team\": row.team,\n \"type\": row.type,\n \"subtype\": row.subtype,\n \"outcome\": row.outcome,\n \"from_player\": row.from_player,\n \"to_player\": row.to_player,\n\n \"start_frame\": row.start_frame,\n \"end_frame\": row.end_frame,\n\n \"start_time\": row.start_time,\n \"end_time\": row.end_time,\n\n \"start_x\": row.start_x,\n \"start_y\": row.start_y,\n \"end_x\": row.end_x,\n \"end_y\": row.end_y,\n }\n all_actions.append(attributes)\n\n actiondf = pd.DataFrame(all_actions)\n return actiondf\n\n\ndef action_exception():\n raise Exception(\"Invalid Action\")\n\n\ndef get_seperate_action(event_dataframe, action):\n action_switch = {\n \"PASS\": {\n \"home_passes_1\": [],\n \"away_passes_1\": [],\n \"home_passes_2\": [],\n \"away_passes_2\": []\n },\n \"SHOT\": {\n \"home_shots_1\": [],\n \"away_shots_1\": [],\n \"home_shots_2\": [],\n \"away_shots_2\": []\n }\n }\n seperate_actions = action_switch.get(action, lambda: action_exception())\n\n for index, row in event_dataframe.iterrows():\n if row.type == action:\n attributes = {\n \"team\": row.team,\n \"period\": row.period,\n \"type\": row.type,\n \"subtype\": row.subtype,\n \"outcome\": row.outcome,\n \"from_player\": row.from_player,\n \"to_player\": row.to_player,\n\n \"start_frame\": row.start_frame,\n \"end_frame\": row.end_frame,\n\n \"start_time\": row.start_time,\n \"end_time\": row.end_time,\n\n \"start_x\": row.start_x,\n \"start_y\": row.start_y,\n \"end_x\": row.end_x,\n \"end_y\": row.end_y,\n }\n assign_passes(seperate_actions, attributes)\n\n for key, value in seperate_actions.items():\n # noinspection PyTypeChecker\n seperate_actions[key] = pd.DataFrame(value)\n\n return seperate_actions\n\n\ndef distance_to_goal(shot_loc):\n if shot_loc[0] > 0:\n goal_loc = np.array([53., 0.])\n else:\n goal_loc = np.array([-53., 0.])\n\n return np.sqrt(np.sum((shot_loc - goal_loc) ** 2))\n\n\ndef goal_angle(shot_loc):\n if shot_loc[0] > 0:\n p0 = np.array((53., 4.)) # Left Post\n p1 = np.array(shot_loc, dtype=np.float)\n p2 = np.array((53., -4.)) # Right Post\n\n v0 = p0 - p1\n v1 = p2 - p1\n else:\n p0 = np.array((-53., -4.)) # Left Post\n p1 = np.array(shot_loc, dtype=np.float)\n p2 = np.array((-53., 4.)) # Right Post\n\n v0 = p0 - p1\n v1 = p2 - p1\n\n angle = np.abs(np.math.atan2(np.linalg.det([v0, v1]), np.dot(v0, v1)))\n\n return angle\n\n\ndef determine_starters(dataframe):\n dataframe = dataframe.iloc[:1, 3:31]\n players = []\n i = 0\n for col in dataframe:\n if (dataframe[col][0] is not np.nan) and i % 2 != 0:\n player = col[:len(col) - 1]\n players.append(player)\n i += 1\n return players\n\n\ndef assign_passes(match_dict, pass_attributes):\n if pass_attributes[\"team\"] == \"Home\":\n if pass_attributes[\"period\"] == 1:\n match_dict[\"home_passes_1\"].append(pass_attributes)\n else:\n match_dict[\"home_passes_2\"].append(pass_attributes)\n else:\n if pass_attributes[\"period\"] == 1:\n match_dict[\"away_passes_1\"].append(pass_attributes)\n else:\n match_dict[\"away_passes_2\"].append(pass_attributes)\n" }, { "alpha_fraction": 0.7954971790313721, "alphanum_fraction": 0.7954971790313721, "avg_line_length": 37.07143020629883, "blob_id": "d1a635a8ce69d0cf50624f025aa2b0b63cf07ecf", "content_id": "bd1fe8aaf75dfbcbec24dd4071e53a4555dea178", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 533, "license_type": "no_license", "max_line_length": 101, "num_lines": 14, "path": "/modules/data_visualization.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "from soccerutils.pitch import Pitch\nimport numpy as np\nimport pandas as pd\nimport modules.helper_methods as helper\nimport matplotlib as plt\n\ntracking_path = 'C:\\\\Users\\\\reynosaj\\\\PycharmProjects\\\\metrica_data_analysis\\\\data\\\\TrackingData.csv'\ntrackingdf = helper.get_tracking_data(tracking_path)\n\nevents_path = 'C:\\\\Users\\\\reynosaj\\\\PycharmProjects\\\\metrica_data_analysis\\\\data\\\\EventsData.csv'\neventdf = helper.get_events_data(events_path)\n\nstarters = helper.determine_starters(trackingdf)\neventsdf = helper.get_all_events(eventdf)\n" }, { "alpha_fraction": 0.6128494143486023, "alphanum_fraction": 0.6357947587966919, "avg_line_length": 35.318180084228516, "blob_id": "f7d819e8842e775101b720bd900e1d94efae8d25", "content_id": "f7d91152bc953fa8ff359c195791fd1a97f8c43e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2397, "license_type": "no_license", "max_line_length": 108, "num_lines": 66, "path": "/modules/clustering.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import matplotlib.pylab as plt\nimport matplotlib.patches as mpatches\nfrom sklearn.cluster import KMeans\nfrom soccerutils.pitch import Pitch\nimport modules.helper_methods as helper\n\nurl = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \\\n 'Sample_Game_1_RawEventsData.csv'\n\neventdf = helper.get_events_dataframe(url)\n\npassdf_dict = helper.get_seperate_action(eventdf, action=\"PASS\")\n\n\ndef make_model(homedf, awaydf):\n homemodel = KMeans(n_clusters=30)\n awaymodel = KMeans(n_clusters=30)\n\n homefeatures = homedf[['start_x', 'start_y', 'end_x', 'end_y']]\n homefit = homemodel.fit(homefeatures)\n\n awayfeatures = awaydf[['start_x', 'start_y', 'end_x', 'end_y']]\n awayfit = awaymodel.fit(awayfeatures)\n\n homedf[\"cluster\"] = homemodel.predict(homefeatures)\n awaydf[\"cluster\"] = awaymodel.predict(awayfeatures)\n\n return homefit, awayfit\n\n\ndef plot_arrows(model_fits, axis1, axis2):\n for period in range(2): # Two periods\n for team in range(2): # Two teams\n for i, (start_x, start_y, end_x, end_y) in enumerate(model_fits[period][team].cluster_centers_):\n axis = axis1 if period == 0 else axis2\n axis.arrow(start_x, start_y, end_x - start_x, end_y - start_y,\n head_width=1,\n head_length=1,\n color='blue' if team == 0 else 'red',\n alpha=0.5,\n length_includes_head=True)\n\n # ax1.text((start_x + end_x) / 2, (start_y + end_y) / 2, str(i + 1))\n\n\nmatch_fits = [make_model(passdf_dict[\"home_passes_1\"], passdf_dict[\"away_passes_1\"]),\n make_model(passdf_dict[\"home_passes_2\"], passdf_dict[\"away_passes_2\"])]\n# match_fits = [[period1], [period2]]; period1 = [homefit1, awayfit1]; period2 = [homefit2, awayfit2]\n\nfig, (ax1, ax2) = plt.subplots(2, sharex=\"all\", sharey=\"all\", figsize=(10, 8))\nplot_arrows(match_fits, ax1, ax2)\n\n# Plot properties\nred_patch = mpatches.Patch(color='red', label='Away Team')\nblue_patch = mpatches.Patch(color='blue', label='Home Team')\nfig.legend(handles=[red_patch, blue_patch])\n\nax1.set_title(\"First Half\")\nax2.set_title(\"Second Half\")\nplt.xlim(-53, 53)\nplt.ylim(-34, 34)\n# fig.savefig('passing.png', dpi=100)\n\nplt.show()\n\n# Maybe work on pass difficulty making use of pass_distance and pass_angle?\n" }, { "alpha_fraction": 0.7526502013206482, "alphanum_fraction": 0.7597172856330872, "avg_line_length": 30.22222137451172, "blob_id": "ef9ee4044b877d718fe2d597ba3def47876ef771", "content_id": "2e09d82395747b686747c4d38f8bdb7e950953ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 97, "num_lines": 9, "path": "/modules/data_extraction_and_transformation.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import modules.helper_methods as helper\n\nurl = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \\\n 'Sample_Game_1_RawEventsData.csv'\n\neventsdf = helper.get_events_dataframe(url)\nshotsdf = helper.get_all_shots(eventsdf)\n\nprint(shotsdf)\n\n\n" }, { "alpha_fraction": 0.7077550888061523, "alphanum_fraction": 0.718367338180542, "avg_line_length": 34, "blob_id": "72e1c8ab594050dd11615d146db474062606fd48", "content_id": "ec6681fc7d64e3b5be763795290e2d41f96f1825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1225, "license_type": "no_license", "max_line_length": 121, "num_lines": 35, "path": "/modules/logistic_regression.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom scipy.interpolate import make_interp_spline, BSpline\nimport modules.helper_methods as helper\nfrom sklearn.linear_model import LogisticRegression\nimport numpy as np\n\nurl = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \\\n 'Sample_Game_1_RawEventsData.csv'\n\neventsdf = helper.get_events_dataframe(url)\n\n\nall_shotsdf = helper.get_all_action(eventsdf, action=\"SHOT\")\n\nall_shotsdf['distance_to_goal'] = all_shotsdf.apply(lambda q: helper.distance_to_goal(q[['start_x', 'start_y']]), axis=1)\nall_shotsdf['goal_angle'] = all_shotsdf.apply(lambda q: helper.goal_angle(q[['start_x', 'start_y']]), axis=1)\nall_shotsdf['head'] = all_shotsdf.apply(lambda q: 1 if (\"HEAD\" in q[\"subtype\"]) else 0, axis=1)\n\nmodel = LogisticRegression()\n\nfeatures = all_shotsdf[['distance_to_goal', 'goal_angle', 'head']]\nlabels = all_shotsdf['outcome']\n\nfit = model.fit(features, labels)\n\npredictions = model.predict_proba(features)[:, 1]\n\nxnew = np.linspace(0, len(predictions), 300)\nspl = make_interp_spline(range(len(predictions)), sorted(predictions), k=3) # type: BSpline\npower_smooth = spl(xnew)\nplt.plot(xnew, power_smooth)\n\nplt.show()\n\nprint(\"----------------\")\n" }, { "alpha_fraction": 0.7289719581604004, "alphanum_fraction": 0.7383177280426025, "avg_line_length": 24.176469802856445, "blob_id": "263c20e33c7713b2dc3cdef157d20ec80ca96dc5", "content_id": "07638a42a0476db7282baffcb625fafca1f51dd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 856, "license_type": "no_license", "max_line_length": 97, "num_lines": 34, "path": "/modules/database_population_and_querying.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import modules.helper_methods as helper\nfrom sqlalchemy import create_engine\nimport pandas as pd\n\nurl = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \\\n 'Sample_Game_1_RawEventsData.csv'\neventsdf = helper.get_events_dataframe(url)\n\nengine = create_engine('sqlite://')\neventsdf.to_sql('events', engine)\n\ntop_passers = \"\"\"\nselect from_player as player , count(*) as passes\nfrom events\nwhere outcome=1\nand type = \"PASS\"\ngroup by from_player\norder by passes desc\n\"\"\"\n\nprint(pd.read_sql(top_passers, engine).head(10))\n\n# This was supposed to be xG but I did not have enough data to make\n# a solid calculation\ntop_shots = \"\"\"\nselect from_player as player, count(*) as shots\nfrom events\nwhere outcome=1\nand type = \"SHOT\"\ngroup by from_player\norder by shots desc\n\"\"\"\n\nprint(pd.read_sql(top_shots, engine).head(10))\n" }, { "alpha_fraction": 0.6498980522155762, "alphanum_fraction": 0.66213458776474, "avg_line_length": 33.20930099487305, "blob_id": "b476f307402c22cbb8c206d28ae56f35e1a4cb7e", "content_id": "741cfa42fdc38899523333b24cd6358203c78d0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1471, "license_type": "no_license", "max_line_length": 97, "num_lines": 43, "path": "/modules/linear_regression.py", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nimport modules.helper_methods as helper\n\nurl = 'https://raw.githubusercontent.com/metrica-sports/sample-data/master/data/Sample_Game_1/' \\\n 'Sample_Game_1_RawEventsData.csv'\n\neventsdf = helper.get_events_dataframe(url)\n\npassesdf = helper.get_all_action(eventsdf, action=\"PASS\")\ntotal_passesdf = passesdf.groupby('from_player')['outcome'].count()\npass_accuracydf = passesdf.groupby('from_player')['outcome'].mean() * 100\n\nfig, ax = plt.subplots()\n\nscatter = ax.scatter(total_passesdf, pass_accuracydf)\nax.set_xlabel(\"Total Passes\")\nax.set_ylabel(\"Pass Completion\")\nplt.yticks(np.arange(0, 110, 10))\n\nfor player, total in total_passesdf.items():\n x = total\n y = pass_accuracydf[player]\n plt.annotate(player,\n (x, y),\n textcoords=\"offset points\", # how to position the text\n xytext=(-5, 10), # distance from text to points (x,y)\n ha='center',\n arrowprops=dict(facecolor='black', arrowstyle=\"-\")\n )\n # t = ax.text(x, y, player, fontsize=8)\n\nmodel = LinearRegression()\nfit = model.fit([[x] for x in total_passesdf], pass_accuracydf)\nprint(\"Coefficients: {}\".format(fit.coef_))\nprint(\"Intercept: {}\".format(fit.intercept_))\n\nxfit = [0, 90] # This is the x-axis range of the chart\nyfit = model.predict([[x] for x in xfit])\n\nplt.plot(xfit, yfit, 'r')\nplt.show()\n" }, { "alpha_fraction": 0.6903765797615051, "alphanum_fraction": 0.6903765797615051, "avg_line_length": 23, "blob_id": "2c7f91a530fc333cce3ca506fc222187aa29b31b", "content_id": "eb13c92e9740e45aa0abfa78d23b164f15567193", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 239, "license_type": "no_license", "max_line_length": 86, "num_lines": 10, "path": "/README.md", "repo_name": "JRReynosa/metricadataanalysis", "src_encoding": "UTF-8", "text": "===========\nMetrica Data Analysis\n===========\n\nPurpose\n=========\n\nLearn some sports analytics. Followed tutorials from David Pleuler's Analytics\nHandbook, however, rather than use the Statsbomb data he used, I made use of Metrica's\ntracking data." } ]
8
IvanLukianenko/Discrete_Optimization
https://github.com/IvanLukianenko/Discrete_Optimization
a2184af7ea6e6e32034c553894d795684e7e72cd
a08d6c483a84a548c5d3d6427255280c553183a8
b5ce8c4de2c64e041060c405e9db3c5e88fe3603
refs/heads/main
2023-04-26T10:49:47.454037
2021-05-20T13:48:26
2021-05-20T13:48:26
339,468,058
1
2
null
null
null
null
null
[ { "alpha_fraction": 0.48563483357429504, "alphanum_fraction": 0.5104263424873352, "avg_line_length": 28.16891860961914, "blob_id": "d5f7dffbf7a0b8b8f35a085860a0354d59ab0a7d", "content_id": "2c368c9795b0c26cc4e328c44b5e2f952507b1b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4442, "license_type": "no_license", "max_line_length": 150, "num_lines": 148, "path": "/knapsack/solver.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom collections import namedtuple\nItem = namedtuple(\"Item\", ['index', 'value', 'weight'])\nItem_2 = namedtuple(\"Item\", ['index', 'value', 'weight', 'value_on_weight'])\nglobal_max = -1\n\ndef BranchAndBound(o_v, f_s, actl_v, tkn):\n \"\"\"\n Метод ветвей и границ для рюкзака.\n \"\"\"\n global global_max\n if (f_s < 0):\n return -1, []\n if (len(tkn) == n) and (actl_v < global_max):\n return -1, []\n else:\n if (len(tkn) == n):\n return actl_v, tkn\n if (o_v < global_max):\n return -1, tkn\n if (o_v < 0):\n return -1, tkn\n a, b = BranchAndBound(o_v, f_s - items_2_sorted[len(tkn)].weight, actl_v + items_2_sorted[len(tkn)].value, tkn + [items_2_sorted[len(tkn)].index])\n a1, b1 = BranchAndBound(o_v - items_2_sorted[len(tkn)].value, f_s, actl_v, tkn + [items_2_sorted[len(tkn)].index])\n\n if a > a1:\n global_max, tkn = a, b\n else:\n global_max, tkn = a1, b1\n return global_max, tkn\n\ndef Traceback(arr, K, items_):\n \"\"\"\n По матрице вычислим решение.\n \"\"\"\n items = []\n j = len(items_)\n i = K\n while (arr[i][j] != 0):\n if(arr[i][j] == arr[i][j-1]):\n j-=1\n \n else:\n items.append(items_[j-1].index)\n j-=1\n i-=items_[j].weight\n #print(\"here\")\n \n return items\n\ndef create_table(items, K):\n \"\"\"\n Создаем \"ту самую матрицу\" для динамического программирования рюкзака.\n \"\"\"\n a = [[0] * (len(items)+1) for i in range(K+1)]\n #print (w)\n for i in range(K+1):\n for j in range(len(items)+1):\n if j == 0:\n a[i][j] = 0\n else:\n if(items[j-1].weight <= i):\n a[i][j] = max(a[i][j-1], items[j-1].value + a[i - items[j-1].weight][j-1])\n else:\n a[i][j] = a[i][j-1]\n return a\n\n\ndef greedy_algo(items_2, K):\n \"\"\"\n Жадный алгоритм.\n \"\"\"\n actual_weight = 0\n actual_value = 0\n taken = []\n items_2 = sorted(items_2, key = lambda item: item.value_on_weight, reverse = True) \n i = 0\n while(actual_weight <= K):\n actual_value += items_2[i].value\n actual_weight += items_2[i].weight\n taken.append(items_2[i].index)\n i += 1\n\n del taken[-1]\n return taken, actual_value - items_2[i-1].value\n \n\ndef solve_it(input_data):\n\n lines = input_data.split('\\n')\n\n firstLine = lines[0].split()\n item_count = int(firstLine[0])\n capacity = int(firstLine[1])\n\n items = []\n items_2 = []\n for i in range(1, item_count+1):\n line = lines[i]\n parts = line.split()\n items.append(Item(i-1, int(parts[0]), int(parts[1])))\n items_2.append(Item_2(i-1, int(parts[0]), int(parts[1]), (float(parts[0])/int(parts[1])) ) ) \n \n taken = [0]*len(items)\n\n K = capacity\n items_2_sorted = sorted(items_2, key = lambda item: item.value_on_weight, reverse = True) \n \n if item_count == 400:\n n = 30\n table = create_table(items_2_sorted[:n],K)\n value = table[-1][-1]\n taken_1 = Traceback(table, K, items_2_sorted[:n])\n for i in taken_1:\n taken[i] = 1\n else:\n if len(items) == 10000:\n n = 30\n table = create_table(items_2_sorted[:n],K)\n value = table[-1][-1]\n taken_1 = Traceback(table, K, items_2_sorted[:n])\n for i in taken_1:\n taken[i] = 1\n else:\n table = create_table(items_2_sorted,K)\n value = table[-1][-1]\n taken_1 = Traceback(table, K, items_2_sorted)\n for i in taken_1:\n taken[i] = 1\n \n \n # prepare the solution in the specified output format\n output_data = str(value) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, taken))\n return output_data\n\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)')" }, { "alpha_fraction": 0.5560675859451294, "alphanum_fraction": 0.578341007232666, "avg_line_length": 30.0238094329834, "blob_id": "1b01d7b4dbf34d01cd663b94fa82b89e4cfa9982", "content_id": "0ef31a967ecacd7ea5b2c37d55bc49822a1cabbd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 123, "num_lines": 42, "path": "/vrp/opt2.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "import math\n\ndef tour_length(solution, points):\n obj = length(points[solution[-1].index], points[solution[0].index])\n for index in range(0, len(solution)-1):\n obj += length(points[solution[index].index], points[solution[index+1].index])\n return obj\n\ndef length(point1, point2):\n return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)\n\ndef reverse_segment_if_2better_(tour, i, j, points):\n \"\"\"Чекаем, если уменьшится длина\"\"\"\n #[...A-B...C-D...]\n A, B, C, D = points[tour[i-1].index], points[tour[i].index], points[tour[j-1].index], points[tour[j % len(tour)].index]\n d0 = length(A, B) + length(C, D)\n d1 = length(A, C) + length(B, D)\n\n delta = d1 - d0\n if delta >= 0: \n return 0\n\n tour[i:j] = reversed(tour[i:j])\n return delta\n\n\ndef two_opt(tour, points):\n \"\"\"Перебираем все пары и запускаем функцию выше\"\"\"\n while True:\n delta = 0\n for (a, b) in all_2segments_(len(tour)):\n delta += reverse_segment_if_2better_(tour, a, b, points)\n #print(tour_length(tour, points, len(points)))\n if delta >= 0:\n break \n return tour\n\ndef all_2segments_(n: int):\n \"\"\"комбинации всех пар ребер\"\"\"\n return ((i, j)\n for i in range(n)\n for j in range(i + 2, n + (i>0)))" }, { "alpha_fraction": 0.46069180965423584, "alphanum_fraction": 0.4795597493648529, "avg_line_length": 24.453332901000977, "blob_id": "3c089e56bf0f4345f0ca86ace2c4db4a0e9a09e6", "content_id": "c6a30763308f889acba3d6098720efcac1f6b66b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1908, "license_type": "no_license", "max_line_length": 73, "num_lines": 75, "path": "/variants/dinamic_prog.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "def Traceback(arr, K, w):\n items = []\n j = len(w)\n i = K\n while (arr[i][j] != 0):\n if(arr[i][j] == arr[i][j-1]):\n j-=1\n \n else:\n items.append(j)\n j-=1\n i-=w[j]\n #print(\"here\")\n \n return items\n\ndef create_table(v, w, K):\n a = [[0] * (len(v)+1) for i in range(K+1)]\n #print (w)\n for i in range(K+1):\n for j in range(len(v)+1):\n if j == 0:\n a[i][j] = 0\n else:\n if(w[j-1] <= i):\n a[i][j] = max(a[i][j-1], v[j-1] + a[i - w[j-1]][j-1])\n else:\n a[i][j] = a[i][j-1]\n return a\n\ndef solve_it(input_data):\n # Modify this code to run your optimization algorithm\n\n # parse the input\n lines = input_data.split('\\n')\n\n firstLine = lines[0].split()\n item_count = int(firstLine[0])\n capacity = int(firstLine[1])\n\n items = []\n\n for i in range(1, item_count+1):\n line = lines[i]\n parts = line.split()\n items.append(Item(i-1, int(parts[0]), int(parts[1])))\n\n # a trivial algorithm for filling the knapsack\n # it takes items in-order until the knapsack is full\n \n #value = 0\n #weight = 0\n taken = [0]*len(items)\n\n v = [item.value for item in items]\n w = [item.weight for item in items]\n K = capacity\n #print(w)\n\n table = create_table(v,w,K)\n value = table[-1][-1]\n taken_1 = Traceback(table, K, w)\n for i in taken_1:\n taken[i-1] = 1\n #print(table)\n #for item in items:\n # if weight + item.weight <= capacity:\n # taken[item.index] = 1\n # value += item.value\n # weight += item.weight\n \n # prepare the solution in the specified output format\n output_data = str(value) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, taken))\n return output_data" }, { "alpha_fraction": 0.47192659974098206, "alphanum_fraction": 0.48697248101234436, "avg_line_length": 27.67368507385254, "blob_id": "27913cb0993ef25fc00a1da72b53cdd0c74ca26c", "content_id": "b4b00f007b2663f8d5d32a6931ab5a094f602463", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2758, "license_type": "no_license", "max_line_length": 132, "num_lines": 95, "path": "/coloring/solver.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#---------------------------------------------------------------------------------------------------------------------#\n# Variables:\n# color[node_count] - decision variable\n#\n# Constraints:\n# (General constraint) \n# 1) for edge in edges:\n# color[edge[0]] != color[edge[1]]\n#\n# (Symmetry breakings constraint)\n# 2) for node1 in nodes:\n# for node2 in nodes:\n# if node1 < node2:\n# color[node1] < color[node2]\n#\n#----------------------------------------------------------------------------------------------------------------------#\nfrom ortools.sat.python import cp_model\n\ndef find_max_deg(edges, node_count):\n \"\"\"\n Вычислим максимальную степень в графе.\n \"\"\"\n max_deg = 0\n\n for i in range(0, node_count):\n k = 0\n for edge in edges:\n if i == edge[1] or i == edge[0]:\n k += 1\n if max_deg < k:\n max_deg = k\n \n return max_deg\n\ndef solve_it(input_data):\n\n lines = input_data.split('\\n')\n\n first_line = lines[0].split()\n node_count = int(first_line[0])\n edge_count = int(first_line[1])\n\n edges = []\n for i in range(1, edge_count + 1):\n line = lines[i]\n parts = line.split()\n edges.append((int(parts[0]), int(parts[1])))\n \n #max deg is the minimum of possible number of colors\n max_deg = find_max_deg(edges, node_count)\n \n i = 0\n status = 0\n while (status != cp_model.OPTIMAL):\n model = cp_model.CpModel()\n color = []\n for node in range(node_count):\n color.append(model.NewIntVar(0, i+int(node_count**2/(float(node_count**2 - 2*edge_count))), \"{}\".format(node)))\n \n for edge in edges:\n model.Add(color[edge[0]] != color[edge[1]])\n \n solver = cp_model.CpSolver()\n \n solver.parameters.max_time_in_seconds = 10.0\n\n status = solver.Solve(model)\n i += 1\n solution = []\n \n if status == cp_model.OPTIMAL:\n for c in color:\n solution.append(solver.Value(c))\n\n count = len(set(solution))\n \n output_data = str(count) + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, solution))\n\n return output_data\n\n\nimport sys\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/gc_4_1)')\n\n" }, { "alpha_fraction": 0.5717763304710388, "alphanum_fraction": 0.5801005363464355, "avg_line_length": 31.073047637939453, "blob_id": "4ef89c4377ab9b04c261dc5b3884244c8c01aa78", "content_id": "f15112e1d77bc724f5eba0c04e68290690e66661", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13379, "license_type": "no_license", "max_line_length": 143, "num_lines": 397, "path": "/vrp/solver.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#-------------------------------------------------------------------------------------------------------------#\n#\n# Model:\n# minimize: \n# Sum by i \\in V [dist(0, Ti(0)) + sum by <j, k> (dist(j,k)) + dist(Ti(-1),0)]\n# \n# subject to:\n# 1) sum by j \\in Ti (dj) <= c \n# 2) sum by i \\in V (j \\in Ti) = 1 (j \\in N\\{0})\n#\n#\n# Словами:\n# Нужно минимизировать суммарное растояние всех циклов, при этом нужно, чтобы каждый покупатель\n# был обслужен одним грузовиком, и вместимость грузовика хватало на всех.\n#\n#\n# Дописать локальный поиск так, чтобы удовлетворялись ограничения.\n#-------------------------------------------------------------------------------------------------------------#\nimport math\nfrom collections import namedtuple\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nimport opt3\nimport opt2\nimport numpy as np\nCustomer = namedtuple(\"Customer\", ['index', 'demand', 'x', 'y'])\n\ndef length(customer1, customer2):\n return math.sqrt((customer1.x - customer2.x)**2 + (customer1.y - customer2.y)**2)\n\ndef tspOnSteroids(tour, customers, vehicle_count):\n \"\"\"\n Решение с помощью идеи TSP on steroids.\n \"\"\"\n depot = customers[0]\n for _ in range(vehicle_count-1):\n customers.append(depot)\n tour = opt2.two_opt(tour, customers)\n\n return tour\n \n\ndef findLZI(tour):\n \"\"\"\n Найти индекс последнего нуля.\n \"\"\"\n for i in range(len(tour)-1, 0, -1):\n if tour[i].index == 0:\n return i\n\ndef findFZI(tour):\n \"\"\"\n Найти индекс первого нуля.\n \"\"\"\n for i in range(0, len(tour)):\n if tour[i].index == 0:\n return i \n\ndef trivalSolution(vehicle_count, vehicle_capacity, depot, customers, customer_count):\n \"\"\"\n Пробное решение.\n \"\"\"\n vehicle_tours = []\n customer_count = len(customers)\n remaining_customers = set(customers)\n remaining_customers.remove(depot)\n\n for v in range(0, vehicle_count):\n # print \"Start Vehicle: \",v\n vehicle_tours.append([])\n capacity_remaining = vehicle_capacity\n while sum([capacity_remaining >= customer.demand for customer in remaining_customers]) > 0:\n used = set()\n order = sorted(remaining_customers, key=lambda customer: -customer.demand*customer_count + customer.index)\n \n for customer in order:\n if capacity_remaining >= customer.demand:\n capacity_remaining -= customer.demand\n vehicle_tours[v].append(customer)\n # print ' add', ci, capacity_remaining\n used.add(customer)\n remaining_customers -= used\n return vehicle_tours\n\ndef calculateObj(vehicle_count, vehicle_tours, depot):\n \"\"\"\n Подсчет objective function.\n \"\"\"\n obj = 0\n for v in range(0, vehicle_count):\n vehicle_tour = vehicle_tours[v]\n if len(vehicle_tour) > 0:\n obj += length(depot,vehicle_tour[0])\n for i in range(0, len(vehicle_tour)-1):\n obj += length(vehicle_tour[i],vehicle_tour[i+1])\n obj += length(vehicle_tour[-1],depot)\n return obj\n\ndef fromTourToVehicleTours(tour, vehicle_count, n):\n \"\"\"\n Парсинг решения в виде одного тура в пути грузовиков.\n \"\"\"\n vehicle_tours = [[] for i in range(vehicle_count)]\n \n if tour[0] == 0:\n num_tour = n-1\n for i in range(len(tour)):\n if tour[i].index == 0:\n num_tour += 1\n vehicle_tours[num_tour].append(tour[i])\n \n return vehicle_tours\n else:\n num_tour = n\n lastZeroIndex = findLZI(tour)\n vehicle_tours[num_tour] = tour[lastZeroIndex:] + tour[:findFZI(tour)]\n tour = tour[:lastZeroIndex]\n tour = tour[findFZI(tour):]\n for i in range(len(tour)):\n if tour[i].index == 0:\n num_tour += 1\n vehicle_tours[num_tour].append(tour[i])\n \n return vehicle_tours\n\ndef printTour(tour):\n \"\"\"\n Печать тура.\n \"\"\"\n string = \"\"\n for t in tour:\n string = string + \" \" + str(t.index)\n print(string)\n\ndef sumDemand(vehicle_tours):\n \"\"\"\n Подсчет потребностей.\n \"\"\"\n total_demands = []\n for tour in vehicle_tours:\n tot = 0\n for c in tour:\n tot += c.demand\n total_demands.append(tot)\n return total_demands\n\n\n\ndef rotateCustomers(vehicle_tours, vehicle_capacity):\n \"\"\"\n Подвинуть клиентов.\n \"\"\"\n total_demands = sumDemand(vehicle_tours)\n for i in range(len(vehicle_tours)):\n actual_demand = 0\n if total_demands[i] > vehicle_capacity:\n for j in range(len(vehicle_tours[i])):\n actual_demand += vehicle_tours[i][j].demand\n if actual_demand > vehicle_capacity:\n vehicle_tours[(i+1)%len(vehicle_tours)] += vehicle_tours[i][j:]\n vehicle_tours[i] = vehicle_tours[i][:j]\n break\n return vehicle_tours\n\ndef findNearestDemand(tour, delta):\n \"\"\"\n Найти наименьший элемент, который стоит убрать, чтобы выполнить ограничение.\n \"\"\"\n demand = [x.demand - delta for x in tour]\n return demand.index(min(demand))\n\n\n\ndef exchangeCustomers(vehicle_tours, vehicle_capacity, customers):\n \"\"\"\n Поменять клиентов, для улучшения решения.\n \"\"\"\n total_demands = sumDemand(vehicle_tours)\n deltas = [vehicle_capacity - total_demand for total_demand in total_demands]\n for i in range(len(vehicle_tours)):\n if total_demands[i] > vehicle_capacity:\n customer_index = findNearestDemand(vehicle_tours[i], total_demands[i] - vehicle_capacity)\n for j in range(len(deltas)):\n if deltas[j] + vehicle_tours[i][customer_index].demand > 0:\n #temp = vehicle_tours[i][customer_index]\n vehicle_tours[j].append(vehicle_tours[i][customer_index])\n #vehicle_tours[j].append(customers[0])\n del vehicle_tours[i][customer_index]\n #break\n return vehicle_tours\n\ndef createDistanceTable(points):\n \"\"\"\n Создание матрицы расстояний.\n \"\"\"\n a = [[0] * (len(points)) for i in range(len(points))]\n a = np.array(a)\n for i in range(len(points)):\n for j in range(len(points)):\n a[i][j] = length(points[i],points[j])\n return a\n\ndef create_data_model(vehicle_count, vehicle_capacity, customers):\n \"\"\"\n Создание данных для модели.\n \"\"\"\n data = {}\n data['distance_matrix'] = createDistanceTable(customers)\n data['num_vehicles'] = vehicle_count\n data['depot'] = 0\n data['demands'] = [x.demand for x in customers]\n data['vehicle_capacities'] = [vehicle_capacity] * vehicle_count\n return data\n\ndef collect_solution(data, manager, routing, solution):\n \"\"\"\n Сборка решения.\n \"\"\"\n total_distance = 0\n total_load = 0\n vehicle_tours = []\n for vehicle_id in range(data['num_vehicles']):\n vehicle_tours.append([])\n index = routing.Start(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n vehicle_tours[-1].append(node_index)\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n \n total_distance += route_distance\n total_load += route_load\n return vehicle_tours\n\ndef solveOrTools(vehicle_count, vehicle_capacity, customers):\n \"\"\"\n Решение с помощью ortools модели.\n \"\"\"\n\n data = create_data_model(vehicle_count, vehicle_capacity, customers)\n\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n routing = pywrapcp.RoutingModel(manager)\n\n\n def distance_callback(from_index, to_index):\n \"\"\"\n Возвращает расстояние между клиентами.\n \"\"\"\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n def demand_callback(from_index):\n \"\"\"\n Возвращает потребность клиента.\n \"\"\"\n from_node = manager.IndexToNode(from_index)\n return data['demands'][from_node]\n\n demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)\n routing.AddDimensionWithVehicleCapacity(\n demand_callback_index,\n 0, \n data['vehicle_capacities'], \n True, \n 'Capacity')\n\n dimension_name = 'Distance'\n routing.AddDimension(\n transit_callback_index,\n 0, \n 3000, \n True, \n dimension_name)\n distance_dimension = routing.GetDimensionOrDie(dimension_name)\n distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n solution = routing.SolveWithParameters(search_parameters)\n\n if solution:\n vehicle_tours = collect_solution(data, manager, routing, solution)\n else:\n print('No solution found !')\n return vehicle_tours\n\ndef solveLikeTspOnSteroids(vehicle_count, vehicle_capacity, depot, customer_count, customers):\n \"\"\"\n Решение как tsp on steroids.\n \"\"\"\n customers_copy = customers.copy()\n\n vehicle_tours = trivalSolution(vehicle_count, vehicle_capacity, depot, customers_copy, customer_count)\n\n tourForTsp = []\n tourForTsp += vehicle_tours[0]\n for i in range(1, vehicle_count):\n tourForTsp += vehicle_tours[i]\n tour = tourForTsp\n k = vehicle_count\n vehicle_tours_main = []\n while k > 0:\n tour = tspOnSteroids(tour, customers, vehicle_count)\n vehicle_tours_main.append([])\n demand = 0\n i = 0\n print(\"Остаток тура:\")\n printTour(tour)\n while demand < vehicle_capacity:\n \n if len(tour) == 0 or i == len(tour):\n break\n vehicle_tours_main[-1].append(tour[i])\n\n demand += tour[i].demand\n \n i += 1\n printTour(vehicle_tours_main[-1]) \n print(demand, vehicle_capacity)\n if demand > vehicle_capacity and k != 1:\n vehicle_tours_main[-1].pop()\n i -= 1\n print(\"Next\")\n printTour(vehicle_tours_main[-1]) \n tour = tour[i:]\n k -= 1\n if len(tour) != 0 and k == 0:\n vehicle_tours_main[-1] += tour\n\n \n vehicle_tours = vehicle_tours_main\n\ndef solve_it(input_data):\n\n lines = input_data.split('\\n')\n\n parts = lines[0].split()\n customer_count = int(parts[0])\n vehicle_count = int(parts[1])\n vehicle_capacity = int(parts[2])\n \n customers = []\n for i in range(1, customer_count+1):\n line = lines[i]\n parts = line.split()\n customers.append(Customer(i-1, int(parts[0]), float(parts[1]), float(parts[2])))\n\n \n depot = customers[0] \n vehicle_tours = solveOrTools(vehicle_count, vehicle_capacity, customers)\n \n for i in range(len(vehicle_tours)):\n vehicle_tours[i] = vehicle_tours[i][1:]\n assert sum([len(v) for v in vehicle_tours]) == len(customers) - 1\n for i in range(len(vehicle_tours)):\n for j in range(len(vehicle_tours[i])):\n vehicle_tours[i][j] = customers[vehicle_tours[i][j]]\n \n obj = calculateObj(vehicle_count, vehicle_tours, depot)\n \n \n outputData = '%.2f' % obj + ' ' + str(0) + '\\n'\n for v in range(0, vehicle_count):\n outputData += str(depot.index) + ' ' + ' '.join([str(customer.index) for customer in vehicle_tours[v]]) + ' ' + str(depot.index) + '\\n'\n\n return outputData\n\n\nimport sys\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/vrp_5_4_1)')\n\n" }, { "alpha_fraction": 0.5521382689476013, "alphanum_fraction": 0.5715079307556152, "avg_line_length": 37.82210159301758, "blob_id": "df5b09c03af87ab7fa951d75f5c293c731fd96ee", "content_id": "e467e6a5228875b768c9231b8f1d7e8176411399", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14956, "license_type": "no_license", "max_line_length": 188, "num_lines": 371, "path": "/facility/solver.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#-----------------------------------------------------------------------------------------------------------------------------------#\n# \n# Define the MIP model:\n# \n# Y[W,C] - If j warehouse serves i customer <=> Y[j,i] == 1\n# X[W] - If i warehouse is open <=> X[i] == 1\n#\n# Distance[W,C] - Distance[i, j] shows the distance between i warehouse and j customer\n#\n# Minimize: sum_by_w(facilities[w].setup_cost * X[w]) + sum_all(w,c)(Distance[w, c] * Y[w, c])\n# \n# Subject to:\n# 1) sum_by_c(customers[c].demand * Y[w, c]) <= facilities[w].capacity ( for w in warehouses )\n# 2) Y[w, c] <= X[w] ( for (w, c) in all_combinations(warehouses, customers) )\n# 3) sum_by_w(Y[w, c]) == 1 ( for c in customers )\n#\n#-----------------------------------------------------------------------------------------------------------------------------------#\n#\n# План действий:\n# 1. Составляем матрицу расстояний. Done!\n# 2. Подготовить данные для MIP модели. Done!\n# 3. Локальный поиск для 5-8 тестов. Done!\n# \n#-----------------------------------------------------------------------------------------------------------------------------------#\nfrom collections import namedtuple\nimport math\nimport random \nimport numpy as np\nfrom ortools.linear_solver import pywraplp\n\nPoint = namedtuple(\"Point\", ['x', 'y'])\nFacility = namedtuple(\"Facility\", ['index', 'setup_cost', 'capacity', 'location'])\nCustomer = namedtuple(\"Customer\", ['index', 'demand', 'location'])\n\n\ndef countSumDemand(customers):\n \"\"\"\n Посчитать суммарную потребность массива кустомеров.\n \"\"\"\n res = 0\n for c in customers:\n res += c.demand\n return res\n\ndef giveListOfBiggestFacilities(facilities, customers):\n \"\"\"\n Посчитать сколько требуется складов из facilities, чтобы обслужить массив customers\n \"\"\"\n total_demand = countSumDemand(customers)\n sortFacs = sorted(facilities, key = lambda x: x.capacity, reverse=True)\n listToGive = []\n for f in sortFacs:\n listToGive.append(f)\n total_demand -= f.capacity\n if total_demand <= 0:\n break\n return listToGive\n\n\ndef length(point1, point2):\n \"\"\"\n Вычисление евклидового расстояния между точками.\n \"\"\"\n return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)\n\ndef nearestCheapFacility(facilities, customers, alpha, beta = 0, flag1 = 0, flag2 = 0):\n \"\"\"\n Клиента прикрепляем к ближайшему складу, учитывая его открытие и его вместимость\n \"\"\"\n solution = [-1]*len(customers)\n capacities = [f.capacity for f in facilities]\n \n for c in customers:\n min_cost = -1\n best_id = -1\n for f in facilities:\n if capacities[f.index] >= c.demand:\n if (min_cost == -1 or (alpha * length(f.location, c.location) + (1 - alpha) * f.setup_cost - beta * capacities[f.index] < min_cost and f.capacity == capacities[f.index])):\n best_id = f.index \n min_cost = alpha * length(f.location, c.location) + (1-alpha) * f.setup_cost - beta * capacities[f.index]\n elif (min_cost == -1 or (alpha * length(f.location, c.location) - beta * capacities[f.index] < min_cost and f.capacity != capacities[f.index])):\n best_id = f.index \n min_cost = alpha * length(f.location, c.location) - beta * capacities[f.index]\n solution[c.index] = best_id\n capacities[best_id] -= c.demand\n \n used = [0] * len(facilities)\n for facility_index in solution:\n used[facility_index] = 1\n obj = sum([f.setup_cost*used[f.index] for f in facilities])\n for customer in customers:\n obj += length(customer.location, facilities[solution[customer.index]].location)\n if flag1 == 1:\n obj, solution = opt2(facilities, customers, solution, capacities)\n if flag2 == 1:\n obj, solution = opt2Facilities(facilities, customers, solution, capacities)\n\n\n return obj, solution\n\ndef opt2Facilities(facilities, customers, solution, capacities):\n \"\"\"\n сгенерить решение\n проверить каждого customer на уменьшение целевой функции переселить к другому facility\n \"\"\"\n quantity_of_customers = [0] * len(facilities)\n for c in solution:\n quantity_of_customers[c] += 1\n for c, f in enumerate(solution):\n for facility in facilities:\n if f == facility.index:\n continue\n else:\n f1, c1, f2 = facilities[f], customers[c], facility\n f1_c1 = length(f1.location, c1.location)\n f2_c1 = length(f2.location, c1.location)\n d0 = f1_c1 + f1.setup_cost + f2.setup_cost\n if capacities[f2.index] >= c1.demand:\n if quantity_of_customers[f1.index] == 1:\n d1 = f2_c1 + f2.setup_cost\n else:\n d1 = f2_c1 + f1.setup_cost + f2.setup_cost\n else: \n continue\n if quantity_of_customers[f2.index] == 0:\n d0 -= f2.setup_cost\n if d1 - d0 < 0:\n solution[c] = f2.index\n capacities[f] += c1.demand\n capacities[f2.index] -= c1.demand\n quantity_of_customers[f1.index] -= 1\n quantity_of_customers[f2.index] += 1\n else:\n continue\n used = [0] * len(facilities)\n for facility_index in solution:\n used[facility_index] = 1\n obj = sum([f.setup_cost*used[f.index] for f in facilities])\n for customer in customers:\n obj += length(customer.location, facilities[solution[customer.index]].location)\n #print(capacities)\n return obj, solution\n \n\ndef opt2(facilities, customers, solution, capacities):\n \"\"\"\n Сгенерив решение, проверить улучшиться ли objective function, если поменять склады у клиентов.\n \"\"\"\n quantity_of_customers = [0] * len(facilities)\n for c in solution:\n quantity_of_customers[c] += 1\n\n for c1, f1 in enumerate(solution):\n for j in range(c1+1, len(solution)):\n f1_, c1_, f2, c2 = facilities[f1], customers[c1], facilities[solution[j]], customers[j]\n f1_c1 = length(f1_.location, c1_.location)\n f2_c2 = length(f2.location, c2.location)\n f2_c1 = length(f2.location, c1_.location)\n f1_c2 = length(f1_.location, c2.location)\n d0 = f1_c1 + f2_c2 + f1_.setup_cost + f2.setup_cost\n d1 = f2_c1 + f1_c2 + f1_.setup_cost + f2.setup_cost\n delta = -d0 + d1\n\n if delta >= 0:\n continue\n else:\n temp = None\n\n if capacities[f1] + c1_.demand >= c2.demand and capacities[f2.index] + c2.demand >= c1_.demand:\n temp = solution[c1]\n solution[c1] = solution[j]\n solution[j] = temp \n capacities[f1] = capacities[f1] + c1_.demand - c2.demand\n capacities[f2.index] = capacities[f2.index] + c2.demand - c1_.demand\n\n used = [0] * len(facilities)\n for facility_index in solution:\n used[facility_index] = 1\n obj = sum([f.setup_cost*used[f.index] for f in facilities])\n for customer in customers:\n obj += length(customer.location, facilities[solution[customer.index]].location)\n #print(capacities)\n return obj, solution\n \n\ndef createData(facilities, customers, facility_count, customer_count):\n \"\"\"\n Обработка данных для MIP модели.\n \"\"\"\n\n data = {}\n\n #Сначала разбираемся с objective function\n data[\"num_vars\"] = facility_count + customer_count * facility_count\n costs = [facility.setup_cost for facility in facilities]\n distances = []\n for i in range(facility_count):\n for j in range(customer_count):\n distances.append(length(facilities[i].location, customers[j].location))\n data[\"obj_coefs\"] = costs + distances\n\n #Далее разбираемся с constraints\n\n #Constraints 1\n #sum_by_c(customers[c].demand * Y[w, c]) <= facilities[w].capacity ( for w in warehouses )\n data[\"constraint_coefs\"] = []\n data[\"bounds\"] = []\n for i, f in enumerate(facilities): \n #cначала добавляем нули\n line = [0] * facility_count + [0] * facility_count * customer_count\n \n for j in range(customer_count):\n line[facility_count + (customer_count) * (i) + j] = customers[j].demand\n data[\"bounds\"].append(f.capacity)\n data[\"constraint_coefs\"].append(line)\n\n #Constraint 2\n #Y[w, c] <= X[w] ( for (w, c) in all_combinations(warehouses, customers) )\n # -X[w] + Y[w, c] <= 0 \n \n for i, f in enumerate(facilities):\n line = [0] * facility_count + [0] * facility_count * customer_count\n line[i] = -1\n k = 0\n \n for l in range(customer_count):\n line_copy = line.copy()\n data[\"constraint_coefs\"].append(line_copy)\n data[\"constraint_coefs\"][-1][facility_count + i*customer_count+l] = 1\n data[\"bounds\"].append(0)\n \n #Constraint 3\n #sum_by_w(Y[w, c]) == 1 ( for c in customers )\n for j, f in enumerate(customers):\n line = [0] * facility_count + [0] * facility_count * customer_count\n for k in range(facility_count + j, len(line),customer_count ):\n line[k] = 1\n data[\"constraint_coefs\"].append(line)\n data[\"bounds\"].append(1)\n\n data[\"num_constraints\"] = len(data[\"constraint_coefs\"])\n return data\n\n\ndef unmap(mapping, index):\n \"\"\"\n Обратное отображение.\n \"\"\"\n return mapping.index(index)\n \n\ndef solveWithOrtools(facilities, customers, facility_count, customer_count):\n \"\"\"\n Решение с помощью ortools MIP model.\n \"\"\"\n n = facilities[0].index\n facility_count = len(facilities)\n\n data = createData(facilities, customers, facility_count, customer_count)\n \n solver = pywraplp.Solver.CreateSolver(\"SCIP\")\n x = {}\n for j in range(data['num_vars']):\n x[j] = solver.IntVar(0, 1, 'x[%i]' % j)\n print('Number of variables =', solver.NumVariables())\n\n for i in range(data['num_constraints']):\n if i >= facility_count + facility_count*customer_count:\n constraint = solver.RowConstraint(data['bounds'][i], data['bounds'][i], '')\n else:\n constraint = solver.RowConstraint(-2, data['bounds'][i], '')\n for j in range(data['num_vars']):\n constraint.SetCoefficient(x[j], data['constraint_coefs'][i][j])\n print('Number of constraints =', solver.NumConstraints())\n\n objective = solver.Objective()\n for j in range(data['num_vars']):\n objective.SetCoefficient(x[j], data['obj_coefs'][j])\n objective.SetMinimization()\n\n status = solver.Solve()\n rawSolution = []\n if status == pywraplp.Solver.OPTIMAL:\n print('Objective value =', solver.Objective().Value())\n for j in range(data['num_vars']):\n rawSolution.append(x[j].solution_value())\n print()\n print('Problem solved in %f milliseconds' % solver.wall_time())\n print('Problem solved in %d iterations' % solver.iterations())\n print('Problem solved in %d branch-and-bound nodes' % solver.nodes())\n else:\n print('The problem does not have an optimal solution.')\n X = None\n X = rawSolution[:facility_count]\n rawSolution = rawSolution[facility_count:]\n Y = []\n for i in range(0, len(rawSolution), customer_count):\n Y.append(rawSolution[:customer_count])\n rawSolution = rawSolution[customer_count:]\n Y = np.array(Y)\n Y = Y.T\n solution = [-1]*len(customers)\n for i, y in enumerate(Y):\n for j in range(len(y)):\n if y[j] == 1:\n solution[i] = j + n\n \n used = [0] * facility_count\n for facility_index in solution:\n used[facility_index - n] = 1\n obj = sum([f.setup_cost*used[f.index-n] for f in facilities])\n for customer in customers:\n obj += length(customer.location, facilities[solution[customer.index] -n].location)\n\n return obj, solution\n\ndef solve_it(input_data):\n # Modify this code to run your optimization algorithm\n\n # parse the input\n lines = input_data.split('\\n')\n\n parts = lines[0].split()\n facility_count = int(parts[0])\n customer_count = int(parts[1])\n \n facilities = []\n for i in range(1, facility_count+1):\n parts = lines[i].split()\n facilities.append(Facility(i-1, float(parts[0]), int(parts[1]), Point(float(parts[2]), float(parts[3])) ))\n\n customers = []\n for i in range(facility_count+1, facility_count+1+customer_count):\n parts = lines[i].split()\n customers.append(Customer(i-1-facility_count, int(parts[0]), Point(float(parts[1]), float(parts[2]))))\n \n\n\n if facility_count * customer_count < 50000:\n obj, solution = solveWithOrtools(facilities, customers, facility_count, customer_count)\n elif facility_count * customer_count == 100000:\n obj, solution = solveWithOrtools(facilities[48:69], customers, facility_count, customer_count)\n else:\n min_ = 100000000000\n bst = None\n #for alpha in np.arange(0.7, 0.9, 0.05):\n if facility_count == 1000 and customer_count == 1500:\n obj, solution = nearestCheapFacility(facilities, customers, 0.85, 0, 1, 1)\n elif (facility_count ==500 and customer_count == 3000) or facility_count * customer_count == 160000:\n obj, solution = nearestCheapFacility(facilities, customers, 0.85)\n elif facility_count * customer_count >= 4000000:\n obj, solution = nearestCheapFacility(facilities, customers, 0.85)\n \n output_data = '%.2f' % obj + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, solution))\n\n return output_data\n\nimport sys\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/fl_16_2)')\n\n" }, { "alpha_fraction": 0.6575342416763306, "alphanum_fraction": 0.732876718044281, "avg_line_length": 25.636363983154297, "blob_id": "10e7f03fc744bd1e06aec2fde430639f5582bc81", "content_id": "f06ca46ee0cf68595408d5abdfd00635ba1efc25", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "no_license", "max_line_length": 67, "num_lines": 11, "path": "/ReadMe.md", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "# Coursera \"Discrete Optimization\"\n## Results:\nKnapsack - 60/60, method: DP + density heuristic\n\nGraph Coloring - 47/60, method: CP\n\nTraveling Salesman - 42/60, method: Local Search(2opt/3opt) with SA\n\nFacility Location - 57/80, method: MIP + Local Search\n\nVehicle Routing - 42/60, method: CP" }, { "alpha_fraction": 0.580958366394043, "alphanum_fraction": 0.5978703498840332, "avg_line_length": 27.5, "blob_id": "aec5640f7e8ed81d27988d9587ecef3ddd0f38ff", "content_id": "c4dc3cac940447343fbb117558fa2012e5787e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3285, "license_type": "no_license", "max_line_length": 134, "num_lines": 112, "path": "/tsp/solver.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport math\nfrom collections import namedtuple\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport itertools\nPoint = namedtuple(\"Point\", ['x', 'y'])\n\nimport opt2\n\ndef length(point1, point2):\n \"\"\"\n Вычисление евклидового расстояния.\n \"\"\"\n return math.sqrt((point1.x - point2.x)**2 + (point1.y - point2.y)**2)\n\ndef create_table(nodeCount, points):\n \"\"\"\n Создание матрицы расстояний.\n \"\"\"\n a = [[0] * (nodeCount) for i in range(nodeCount)]\n a = np.array(a)\n for i in range(nodeCount):\n for j in range(nodeCount):\n a[i][j] = length(points[i],points[j])\n return a\n\ndef find_nearest_point(index, d_m, set_of_points):\n \"\"\"\n Поиск ближайшей точки.\n \"\"\"\n min_path = 200000\n index_of_min = -1\n for j in set_of_points:\n if d_m[index][j] < min_path and j!=index:\n min_path = d_m[index][j]\n index_of_min = j\n if(index_of_min == -1):\n return index, []\n else:\n set_of_points.remove(index_of_min)\n return index_of_min, set_of_points\n\ndef greedy(s, d_m, points):\n \"\"\"\n Жадный алгоритм для tsp.\n \"\"\"\n starting_point = s\n set_of_points = list(range(len(d_m[0])))\n set_of_points.remove(s)\n actual_point, set_of_points = find_nearest_point(starting_point, d_m, set_of_points)\n solution = []\n solution.append(starting_point)\n solution.append(actual_point)\n\n i = 2\n while i < len(d_m[0]):\n actual_point, set_of_points = find_nearest_point(actual_point, d_m, set_of_points)\n solution.append(actual_point)\n i += 1\n return solution\n\ndef solve_it(input_data):\n # Modify this code to run your optimization algorithm\n\n # parse the input\n lines = input_data.split('\\n')\n points = []\n nodeCount = int(lines[0])\n tabu_list = [(0,0), (0,0), (0,0)]\n \n for i in range(1, nodeCount+1):\n line = lines[i]\n parts = line.split()\n points.append(Point(float(parts[0]), float(parts[1])))\n\n solution = [i for i in range(nodeCount)]\n\n if nodeCount < 20000:\n s = random.randint(0, nodeCount-1)\n distance_matrix = create_table(nodeCount, points)\n solution = greedy(s, distance_matrix, points)\n else:\n solution = [i for i in range(nodeCount)]\n random.shuffle(solution)\n\n solution = opt2.two_opt(solution, points)\n\n obj = length(points[solution[-1]], points[solution[0]])\n for index in range(0, nodeCount-1):\n obj += length(points[solution[index]], points[solution[index+1]])\n\n # prepare the solution in the specified output format\n output_data = '%.2f' % obj + ' ' + str(0) + '\\n'\n output_data += ' '.join(map(str, solution))\n\n return output_data\n\n\nimport sys\n\nif __name__ == '__main__':\n import sys\n if len(sys.argv) > 1:\n file_location = sys.argv[1].strip()\n with open(file_location, 'r') as input_data_file:\n input_data = input_data_file.read()\n print(solve_it(input_data))\n else:\n print('This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/tsp_51_1)')\n\n" }, { "alpha_fraction": 0.39918991923332214, "alphanum_fraction": 0.4252925217151642, "avg_line_length": 30.757143020629883, "blob_id": "2407d5f46fe297f060ca297556249ca8b9552af5", "content_id": "b24be9426c7d2859ae2ba9f69ccaaa7babfa61f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2346, "license_type": "no_license", "max_line_length": 141, "num_lines": 70, "path": "/tsp/opt3.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "import math\n\n#-------------------------------------------------------------------------------------------------------------------------------#\n# Описание:\n#\n# Модуль для 3-opt локального поиска с отжигом.\n#\n#-------------------------------------------------------------------------------------------------------------------------------#\n\ndef reverse_segment_if_3better(tour, i, j, k, t, points):\n \"\"\"Тоже самое\"\"\"\n #Дан путь - [...A-B...C-D...E-F...]\n A, B, C, D, E, F = points[tour[i-1]], points[tour[i]], points[tour[j-1]], points[tour[j]], points[tour[k-1]], points[tour[k % len(tour)]]\n d0 = length(A, B) + length(C, D) + length(E, F)\n d1 = length(A, C) + length(B, D) + length(E, F)\n d2 = length(A, B) + length(C, E) + length(D, F)\n d3 = length(A, D) + length(E, B) + length(C, F)\n d4 = length(F, B) + length(C, D) + length(E, A)\n\n deltas = [d1 - d0, d2 - d0, d3 - d0, d4 - d0]\n\n min_ = min(deltas)\n #random.random() - вернет вероятность от 0 до 1 \n if min_ >= 0: \n if random.random() > np.exp(-min_/max(t, 0.0000001)):\n #print(\"Не Отжигаю\")\n return 0\n else:\n #print(\"Отжигаю\")\n pass\n return 0\n if deltas[2] == min_:\n tmp = tour[j:k] + tour[i:j]\n tour[i:k] = tmp\n return deltas[2]\n elif deltas[0] == min_:\n tour[i:j] = reversed(tour[i:j])\n return deltas[0]\n elif deltas[1] == min_:\n tour[j:k] = reversed(tour[j:k])\n return deltas[1]\n elif deltas[3] == min_:\n tour[i:k] = reversed(tour[i:k])\n return deltas[3]\n\ndef three_opt(tour, points):\n \"\"\"Тоже самое шо и с два оптом\"\"\"\n big_delta = 0\n t = 100\n k = 0\n while True:\n delta = 0\n for (a, b, c) in all_3segments(len(tour)):\n delta += reverse_segment_if_better(tour, a, b, c, t, points)\n t = 0.9*t\n if delta >= 0:\n break \n k += 1\n if len(tour) >= 500:\n if k == 1: \n break\n return tour\n\n\ndef all_3segments(n: int):\n \"\"\"Все тройки\"\"\"\n return ((i, j, k)\n for i in range(n)\n for j in range(i + 2, n)\n for k in range(j + 2, n + (i > 0)))" }, { "alpha_fraction": 0.42328041791915894, "alphanum_fraction": 0.4642857015132904, "avg_line_length": 20.571428298950195, "blob_id": "14557c40756fbe492e6e5a1adddfa8d0f59d04dd", "content_id": "8529d89e0357cda03a6d418ea0207470f27d4637", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 70, "num_lines": 35, "path": "/knapsack/variants/bnb.py", "repo_name": "IvanLukianenko/Discrete_Optimization", "src_encoding": "UTF-8", "text": "\nglobal_max = 0\nglobal n\nn = 3\nw = [5, 8, 3]\nv = [45, 48, 35]\nK = 10\ntkn = []\ndef LS(o_v, f_s, actl_v, tkn):\n global global_max\n global tkn1\n if (f_s <= 0):\n return 0, []\n if (actl_v < global_max):\n return 0, []\n if (len(tkn) == n) and (actl_v< global_max):\n return 0, []\n else:\n if (len(tkn) == n):\n tkn1 = tkn\n return actl_v, tkn\n if (o_v < global_max):\n return 0, []\n\n a, b = LS(o_v, f_s - w[len(tkn)], actl_v + v[len(tkn)], tkn + [1])\n a1, b1 = LS(o_v - v[len(tkn)], f_s, actl_v, tkn + [0])\n\n if a > a1:\n global_max, tkn = a, b\n else:\n global_max, tkn = a1, b1\n return global_max, tkn\n\nvalue, taken = LS(128, K, 0, [])\nprint(value)\nprint(taken)\n" } ]
10
cmilton001/django_onlineShopping_website_development
https://github.com/cmilton001/django_onlineShopping_website_development
0c09e35bc2162f6d9d971f5a6a74ef1301c5ca88
97bd605b0d3876de0d549b69035f10ecf7519750
d7870e28d6987a555d1f0f88ff5c7278e895d2ad
refs/heads/master
2023-05-13T09:40:12.167340
2021-06-01T08:08:16
2021-06-01T08:08:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5702270865440369, "alphanum_fraction": 0.5996635556221008, "avg_line_length": 32.97142791748047, "blob_id": "23916f2ec202d5ff4bdc2c0008d103b5e1ee52a2", "content_id": "b004f86379936f55b81eab61d45047b4000f830a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1189, "license_type": "no_license", "max_line_length": 131, "num_lines": 35, "path": "/orders/migrations/0002_auto_20210601_0315.py", "repo_name": "cmilton001/django_onlineShopping_website_development", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2.3 on 2021-06-01 07:15\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0002_auto_20210601_0157'),\n ('orders', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='order',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='orderitem',\n name='id',\n field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),\n ),\n migrations.AlterField(\n model_name='orderitem',\n name='order',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='items', to='orders.order'),\n ),\n migrations.AlterField(\n model_name='orderitem',\n name='product',\n field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='order_items', to='shop.product'),\n ),\n ]\n" }, { "alpha_fraction": 0.6876574158668518, "alphanum_fraction": 0.6876574158668518, "avg_line_length": 30.760000228881836, "blob_id": "b71ba8ba0038cb141b805797d48e76238cc95f7a", "content_id": "43b1ff94ff7b8429e354687c20cb8bf4c40a9591", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 794, "license_type": "no_license", "max_line_length": 66, "num_lines": 25, "path": "/myshop/urls.py", "repo_name": "cmilton001/django_onlineShopping_website_development", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nimport cart.views\nimport orders.views\nimport payment.views\nimport shop.views\nimport coupons.views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^cart/', cart.views.cart_add, name='cart'),\n url(r'^orders/', orders.views.order_create, name='orders'),\n url(r'^payment/', payment.views.payment_done, name='payment'),\n url(r'^', shop.views.product_list, name='shop'),\n url(r'^coupons/', coupons.views.coupon_apply, name='coupons'),\n #url(r'^paypal/', 'paypal.standard.ipn.urls'),\n\n]\n\nif settings.DEBUG:\n urlpatterns += static( settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT )\n" } ]
2
tonyferrell/aoc2020
https://github.com/tonyferrell/aoc2020
3823bdf058cccf49593f436935df927e117120f0
b8ef3bcaeface1b1d2f05669a3758a6c9dbc1343
32552bf16a09a3f65cc5f7c712bde6cfb887e312
refs/heads/master
2023-01-31T09:03:28.973277
2020-12-17T07:01:49
2020-12-17T07:01:49
322,070,443
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5467380881309509, "alphanum_fraction": 0.554527759552002, "avg_line_length": 26.039474487304688, "blob_id": "78b085b6098afbfa24dc27464aa5286b6a1663bd", "content_id": "14a77d4b86ac5fd93352df9fd5010b00ac29ac39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2054, "license_type": "no_license", "max_line_length": 70, "num_lines": 76, "path": "/day14/day14sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "import re\n\ncurr_mask = \"\"\nmem = {}\n \ndef expand_mask(mask, val, accum, gen_masks):\n assert len(val) == len(mask), \"Mask and Value must be the same\"\n if mask == \"\" and val == \"\":\n gen_masks.append(accum)\n # print(\"Found Mask! {:b}\".format(accum))\n return\n \n next_m, remain_m = mask[0], mask[1:]\n next_v, remain_v = val[0], val[1:]\n\n accum = accum << 1\n if next_m == '1':\n accum = accum | 1\n expand_mask(remain_m, remain_v, accum, gen_masks)\n return\n elif next_m == '0':\n b = 1 if next_v == '1' else 0\n expand_mask(remain_m, remain_v, accum | b, gen_masks)\n return\n elif next_m == 'X':\n expand_mask(remain_m, remain_v, accum, gen_masks)\n expand_mask(remain_m, remain_v, accum | 1, gen_masks)\n return\n else:\n raise Exception(\"Unknown Mask Char:\", next_m)\n\ndef exec_mask(line: str, verbose = False):\n global curr_mask\n\n _, mask = line.split(\" = \")\n curr_mask = mask.strip()\n if verbose:\n print(\"Curr Mask: {}\".format(curr_mask))\n\ndef apply_mask(curr_mask: str, loc_v:int):\n loc_bin = \"{:b}\".format(loc_v)\n needed_length = max(len(curr_mask), len(loc_bin))\n curr_mask = curr_mask.rjust(needed_length, '0')\n loc_bin = loc_bin.rjust(needed_length, '0')\n\n masks = []\n\n expand_mask(curr_mask, loc_bin, 0, masks)\n return masks\n\nmem_loc = re.compile(\"mem\\[(\\d+)\\]\")\ndef exec_mem(line: str, verbose = False):\n loc, val = line.split(\" = \")\n val_i = int(val)\n\n loc_str = mem_loc.match(loc)\n loc_i = int(loc_str[1])\n\n\n mem_locs = apply_mask(curr_mask, loc_i)\n for loc in mem_locs:\n if verbose:\n print(\"mem[{}] = {}\".format(loc, val_i))\n mem[loc] = val_i\n\n return\n\nwith open('input.txt') as data:\n for line in data:\n verbose = False\n if line.startswith('mem'):\n exec_mem(line, verbose)\n elif line.startswith('mask'):\n exec_mask(line, verbose)\n\nprint(\"Memory Total: {}\".format(sum([val for _, val in mem.items()])))" }, { "alpha_fraction": 0.42153283953666687, "alphanum_fraction": 0.4324817657470703, "avg_line_length": 27.653594970703125, "blob_id": "470a6b9883f2d70ec5315e9bcdcdc92413d403c1", "content_id": "873872d754c495516243fc6b19ba40f445c873e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4384, "license_type": "no_license", "max_line_length": 106, "num_lines": 153, "path": "/day11/day11sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from typing import List\nfrom functools import reduce\n\n\ndef print_board(g_board):\n for line in g_board:\n print(\"\".join(line))\n\n\nclass LifeBoard:\n search_dirs = [\n [0, -1],\n [0, 1],\n [-1, 0],\n [1, 0],\n\n [-1, -1],\n [1, 1],\n [1, -1],\n [-1, 1]\n ]\n\n def __init__(self, board: List[List[str]], hash: List[int]):\n self.board = board\n self.row_count = len(board)\n self.col_count = len(board[0])\n self.hash_list = hash\n\n @classmethod\n def build_input_board(cls, file: str) -> 'LifeBoard':\n with open(file) as data:\n board = []\n hash_list = []\n col_count = None\n for line in data:\n line = line.strip()\n row = list(line)\n n_col = len(row)\n if col_count is None:\n col_count = n_col\n elif col_count != n_col:\n raise Exception(\n \"Non-matching column count: {}!={}\".format(col_count, n_col))\n\n board.append(row)\n\n row_hash = 0\n for i in row:\n bit = 1 if i == '#\"' else 0\n row_hash = (row_hash << 1) | bit\n\n hash_list.append(row_hash)\n\n return LifeBoard(board, hash_list)\n\n def __str__(self):\n str = \"\"\n for i in range(0, self.row_count):\n str += \"{:05d} : {}\\n\".format(\n self.hash_list[i], \"\".join(self.board[i]))\n\n return str\n\n def __eq__(self, o: object) -> bool:\n if isinstance(o, LifeBoard):\n if len(self.hash_list) != len(o.hash_list):\n return False\n\n return reduce(lambda cum, elem: cum and elem[0] == elem[1], zip(self.hash_list, o.hash_list))\n\n return False\n\n def get_adj_count(self, row: int, col: int, verbose=False) -> int:\n adj = 0\n for d_r, d_c in self.search_dirs:\n n_r = row\n n_c = col\n if verbose:\n print(\"Looking for adjacents for ({}, {}) with delta ({}, {})\".format(row, col, d_r, d_c))\n while True:\n n_r = n_r + d_r\n n_c = n_c + d_c\n\n if n_r < 0 or n_r >= self.row_count or n_c < 0 or n_c >= self.col_count:\n # No chairs this direction - we reached the end of the board\n break\n\n next = self.board[n_r][n_c] \n if verbose:\n print(\"Checking ({}, {}) = {}\".format(n_r, n_c, next))\n if next == '.':\n if verbose:\n print(\"Floor - skipping\")\n # This isn't a chair - keep looking\n continue\n\n if next == '#':\n if verbose:\n print(\"Occupied Chair\")\n adj += 1\n\n break\n\n if verbose:\n print(\"### Total Adjacents: ({}, {}) = {}\".format(row,col, adj))\n return adj\n\n def next_seating(self):\n adj_board = []\n hashes = []\n for r in range(0, self.row_count):\n new_row = []\n row_hash = 0\n for c in range(0, self.col_count):\n curr = self.board[r][c]\n new_seat = curr\n \n verbose = False\n adj = self.get_adj_count(r,c, verbose)\n if curr == 'L' and adj == 0:\n new_seat = '#'\n elif curr == '#' and adj >= 5:\n new_seat = 'L'\n\n row_hash = (row_hash << 1) | (1 if new_seat == '#' else 0)\n new_row.append(new_seat)\n\n adj_board.append(new_row)\n hashes.append(row_hash)\n\n return LifeBoard(adj_board, hashes)\n \n def full_seats(self) -> int:\n return sum([sum([1 if digit=='1' else 0 for digit in bin(n)[2:]]) for n in self.hash_list])\n\n\n\ndef part1():\n prev = LifeBoard.build_input_board('input.txt')\n\n for i in range(1000):\n print(\"Board {}\".format(i))\n # print(prev)\n next = prev.next_seating()\n if next == prev:\n print(\"Found Stable Board on iter {} with {} seats\".format(i, next.full_seats()))\n print(next)\n break\n\n prev = next\n\n\npart1()\n" }, { "alpha_fraction": 0.4842868745326996, "alphanum_fraction": 0.5068492889404297, "avg_line_length": 21.581817626953125, "blob_id": "b194d90600a2ce780467530ecd031ca708949021", "content_id": "a2c1cd6281a725c8e9e06ac759fb9a168cb1593b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1241, "license_type": "no_license", "max_line_length": 85, "num_lines": 55, "path": "/day12/day12sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from os import curdir\nfrom typing import List\nfrom functools import reduce\nimport numpy as np\n\ninst = []\nwith open('input.txt') as data:\n inst = [(x[0], int(x[1:-1])) for x in data.readlines()]\n\nxy = [0, 0]\nwp = [10, 1]\ncard_dirs = ['N', 'E', 'S', 'W']\ndirs = [\n [0, 1], # 'N'\n [1, 0], # 'E'\n [0, -1],# 'S'\n [-1, 0],# 'W'\n]\n\nlookup = dict(zip(card_dirs, dirs))\n\ndef scale(dir, amt):\n return [x * amt for x in dir]\n\ndef comb(x, y):\n return [a + b for a, b in zip(x, y)]\n\ndef get_dir(curr_dir: List[int], cmd: str, i: int):\n mult = (-1 if cmd == 'L' else 1)\n steps = mult * int(i / 90)\n new = np.rot90([[curr_dir[0], curr_dir[1]], [-curr_dir[1], -curr_dir[0]]], steps)\n print(\"New:\", new[0])\n curr_dir = new[0]\n return curr_dir\n\n\ndef fmt(s):\n i = dirs.index(s)\n return card_dirs[i]\n\nfor cmd, amt in inst:\n if cmd in ['R', 'L']:\n wp = get_dir(wp, cmd, amt)\n elif cmd == 'F':\n mv = scale(wp, amt)\n xy = comb(xy, mv)\n else:\n # Just a raw wp move\n mv = scale(lookup[cmd], amt)\n wp = comb(wp, mv)\n\n # Also move\n print(\"After {}{} move *** WP: {}, Ship: {}\".format(cmd, amt, wp, xy))\n\nprint(reduce(lambda x,y: abs(x) + abs(y), xy))" }, { "alpha_fraction": 0.46259674429893494, "alphanum_fraction": 0.47721409797668457, "avg_line_length": 23.744680404663086, "blob_id": "36093845d3b50a86ebe59e4d2bf456ac330cdaf2", "content_id": "41e341b80bbf8bfacde3354877fdf35f80f6366e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 72, "num_lines": 47, "path": "/day5/day5sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "import math\ndef bin_part_f(start: int, end_index: int, forward: str, backward: str):\n def bin_part_impl(bin: str):\n s = start\n e = end_index\n # print(\"Doing {}\".format(bin))\n while(len(bin)):\n # print(\"Range: ({}, {})\".format(s, e))\n first, bin = bin[0], bin[1:]\n if first == forward:\n e = math.floor((s + e) / 2)\n elif first == backward:\n s = math.ceil((s + e) / 2)\n \n if s == e:\n return s\n else:\n raise Exception(\"Mistakes were made {}, {}\".format(s, e))\n\n return bin_part_impl\n\nrow_partition = bin_part_f(0, 127, 'F', 'B')\ncol_partition = bin_part_f(0, 7, 'L', 'R')\n\n\nex = 'BBFFBBFRLL'\ndef get_id(i: str) -> int:\n row = row_partition(i[:7])\n col = col_partition(i[7:])\n\n return row * 8 + col\n\nall_ids = []\nwith open('input.txt') as data:\n for row in data:\n new_id = get_id(row)\n all_ids.append(new_id)\n\nall_ids.sort()\n\nlast = -1\nfor i in all_ids:\n if last != -1 and last != (i-1):\n print(\"({}, -{}-, {})\".format(last, (i-1), i))\n last = i\n else:\n last = i\n" }, { "alpha_fraction": 0.43622449040412903, "alphanum_fraction": 0.49744898080825806, "avg_line_length": 22.058822631835938, "blob_id": "b42e783bdbafbe7e80caa69ab26599fca99493e4", "content_id": "01ff793dad9604bf4bc8a88b92f8893d67479975", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 392, "license_type": "no_license", "max_line_length": 75, "num_lines": 17, "path": "/day15/day15sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "input = [12,1,16,3,11,0]\n#input = [0,3,6]\n\nseen = {}\nsaid = input[0]\nfor i in range(0,30000000):\n last_said = said\n if i < len(input):\n said = input[i]\n elif said in seen:\n # print(\"Seen on {}. Saying {}\".format(seen[said], i - seen[said]))\n said = i - seen[said]\n else:\n said = 0\n \n seen[last_said] = i\n print(\"{}: {}\".format(i+1, said))\n" }, { "alpha_fraction": 0.4268185794353485, "alphanum_fraction": 0.43908852338790894, "avg_line_length": 29.052631378173828, "blob_id": "f36ad0c7d806c86dd500d2b936a6b20831b7a2cb", "content_id": "28b56968e076ae963c56ca0fbaed51c075150330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1141, "license_type": "no_license", "max_line_length": 75, "num_lines": 38, "path": "/day2/day2sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "def part1():\n with open('input.txt') as data:\n valid = 0\n for line in data:\n char_range, char, password = line.split(' ')\n min, max = char_range.split('-')\n min, max = int(min), int(max)\n char = char[0]\n\n count = 0\n for c in password:\n if c == char:\n count += 1\n \n if min <= count and count <= max:\n print(password, \"is valid\")\n valid += 1\n print(\"There are {} valid passwords\".format(valid))\n\n\ndef part2():\n with open('input.txt') as data:\n valid = 0\n for line in data:\n char_range, char, password = line.split(' ')\n min, max = char_range.split('-')\n min, max = int(min), int(max)\n char = char[0]\n\n count = 0\n first, second = password[min-1] == char, password[max-1] ==char\n \n if first ^ second:\n print(password, \"is valid\")\n valid += 1\n print(\"There are {} valid passwords\".format(valid))\n\npart2()" }, { "alpha_fraction": 0.42771685123443604, "alphanum_fraction": 0.47756728529930115, "avg_line_length": 19.079999923706055, "blob_id": "684bb643db0f12e652bf057ef6827118cbc4a3f4", "content_id": "b65c081bc54a7fb68030089222808c357c8df4ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1003, "license_type": "no_license", "max_line_length": 64, "num_lines": 50, "path": "/day13/day13sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "with open('input.txt') as data:\n departure = int(next(data))\n r = next(data)\n times = [int(x) if x != 'x' else 0 for x in r.split(\",\")]\n\n\nnum = d_n = times[0]\n# num = 100000000000000\n# num = 102563359500841\n# d_n = max(times)\nadj = times.index(d_n)\n\nwhile num == 0 or num % d_n != 0:\n num += 1\n\nprint(\"Starting at {}, delta of {}\".format(num, d_n))\n\ntimes = list(filter(lambda x: x[1] > 0, list(enumerate(times))))\nprint(times)\n\nc = 0\nwhile True:\n c += 1\n if c % 500000 == 0:\n print(\"Checking\", num)\n success = True\n\n match = 1\n for i, v in times:\n if v == 0:\n continue\n\n c = (num - adj + i)\n\n\n # print(\"Trying {}: ({}) {} != 0\".format(num, v, c % v))\n if c % v != 0:\n success = False\n break\n \n match *= v\n if match > d_n:\n d_n = match\n print(\"New delta\", d_n)\n\n if success:\n print(\"{} was the number!\".format(num - adj))\n break\n \n num += d_n" }, { "alpha_fraction": 0.5218303799629211, "alphanum_fraction": 0.5293870568275452, "avg_line_length": 24.612903594970703, "blob_id": "a88227a11e8545fe0e831a20aef30ba2ece2b2cf", "content_id": "7409d6cbea71ddc9b1e6726d8cd415efc4549bb9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2382, "license_type": "no_license", "max_line_length": 92, "num_lines": 93, "path": "/day8/day8sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "import sys\n\ncontent = []\nwith open('input.txt') as f:\n content = f.readlines()\n\ndef get_instr(idx: int):\n if idx >= len(content):\n raise Exception(\"Out of bounds {}\".format(idx))\n\n try:\n line = content[idx].split(\" \")\n except:\n print(\"###***Failing at {}***###\".format(idx))\n raise\n return line[0], int(line[1])\n\n# Execute the emulator\n\ndef exec_inst(curr_inst, acc, swp_idx):\n\n inst, input = get_instr(curr_inst)\n # print(\"Executing instruction {} {}, looking for swap {}\".format(inst, input, swp_idx))\n\n if curr_inst == swp_idx:\n if inst == 'nop':\n inst = 'jmp'\n elif inst == 'jmp':\n inst = 'nop'\n\n if inst == \"nop\":\n return curr_inst + 1, acc\n elif inst == \"acc\":\n acc += input\n return curr_inst + 1, acc\n elif inst == \"jmp\":\n return curr_inst + input, acc\n else:\n raise Exception(\"Unknown instruction at\", curr_inst)\n\n\ndef find_swp(instruction_set, search_start):\n for i in range(search_start, -1, -1):\n cmd, input = get_instr(i)\n if cmd == \"nop\" or cmd == 'jmp':\n print(\"Found a potential swap {} - {} at {}\".format(cmd, input, i))\n try:\n acc = run_swp_nop_jmp(i)\n print(\"Finished execution. Accumlator value:\", acc)\n sys.exit()\n\n except Exception as ex:\n print(ex)\n continue\n\ndef run_swp_nop_jmp(swp_idx: int):\n print(\"Swapping idx {}\".format(swp_idx))\n acc = 0\n curr_inst = 0\n seen = [0 for _ in range(len(content))]\n\n while(True):\n if curr_inst >= len(content):\n print(\"Execution terminated. Acc: {}\".format(acc))\n return acc\n\n # Cycle detection\n if seen[curr_inst] != 0:\n raise Exception(\n \"Swap at {} not-accepted. Cycle at {}\".format(swp_idx, curr_inst))\n\n seen[curr_inst] = 1\n curr_inst, acc = exec_inst(curr_inst, acc, swp_idx)\n\n\ndef start():\n curr_inst = 0\n acc = 0\n seen = [0 for _ in range(len(content))]\n instructions = []\n\n # Detect the first cycle\n while(True):\n if seen[curr_inst] != 0:\n find_swp(instructions, len(instructions) - 1)\n\n instructions.append(curr_inst)\n\n seen[curr_inst] = 1\n\n curr_inst, acc = exec_inst(curr_inst, acc, -1)\n\nstart()\n" }, { "alpha_fraction": 0.4053475856781006, "alphanum_fraction": 0.427807480096817, "avg_line_length": 24.602739334106445, "blob_id": "c8e06c4a807cff955894336fc7a088be2ae9daf8", "content_id": "cd4d405437c9df8537037e8cf884e14de3f9474d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1870, "license_type": "no_license", "max_line_length": 118, "num_lines": 73, "path": "/day1/day1sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from functools import reduce\n\ndef part1():\n with open('input.txt') as data:\n seen = {}\n for line in data:\n num = int(line)\n\n needed = 2020 - num\n if needed in seen:\n print(\"Answer:\", needed * num)\n break\n else:\n seen[num] = 1\n\ndef part2():\n input = []\n seen = {}\n with open('input.txt') as data:\n for x in data:\n val = int(x)\n seen[val] = [val]\n input.append(val)\n \n input.sort()\n\n size = len(input)\n\n # left = 0\n # right = size - 1\n # while True:\n # low_val = input[left]\n # high_val = input[right]\n\n # if low_val + high_val > 2020 and left < right:\n # right -= 1\n # continue\n \n # while low_val + high_val < 2020 and left < right:\n # needed = 2020 - (low_val + high_val)\n # if needed in seen:\n # print(\"You did it ({}, {}, {}) = {}\".format(low_val, needed, high_val, low_val * needed * high_val))\n # exit()\n # else:\n # left += 1\n # low_val = input[left]\n\n\n pairs = {}\n for i in range(0, size):\n for j in range(i+1, size):\n val1 = input[i]\n val2 = input[j]\n\n sum = val1 + val2\n if sum < 2020:\n if sum in pairs:\n print(\"What happened?!\")\n continue\n\n pairs[val1+val2] = [val1, val2]\n else:\n # No more values in sorted list\n break\n\n for sum, items in pairs.items():\n needed = 2020 - sum\n if needed in seen:\n items.append(needed)\n calc = reduce(lambda x,y: x * y, items)\n print(\"You found it! ({}) = {}\".format(items, calc))\n\npart2()\n\n" }, { "alpha_fraction": 0.4500587582588196, "alphanum_fraction": 0.4647473692893982, "avg_line_length": 27.855932235717773, "blob_id": "9be30c1a67530db2f15d7840ee91ea6869f1b674", "content_id": "ccca8f9ceb726e58c47847bf003c14ca392c3074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3404, "license_type": "no_license", "max_line_length": 84, "num_lines": 118, "path": "/day4/day4sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "import re\n\ndef val_range(min: int, max: int):\n def g(x: str):\n if len(x) != 4:\n return False\n\n i_x = int(x)\n return min <= i_x and i_x <= max\n \n return g\n\nin_match = re.compile(\"(\\d+)in\")\ncm_match = re.compile(\"(\\d+)cm\")\ndef val_height(hgt: str) -> bool:\n i = in_match.match(hgt)\n if i:\n try:\n i_i = int(i.group(1))\n return 59 <= i_i and i_i <= 76 \n except:\n print(\"Bad Regex Data\")\n return False\n \n c = cm_match.match(hgt)\n if c:\n try:\n i_c = int(c.group(1))\n return 150 <= i_c and i_c <= 193 \n except:\n print(\"Bad Regex Data\")\n return False\n \n return False\n \n\nhcl_valid = re.compile(\"^#[a-fA-F0-9]{6}$\")\necl = ['amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth']\npassport = re.compile(\"^[0-9]{9}$\")\nclass Passport:\n req_fields = {\n 'byr': val_range(1920, 2002), # (Birth Year)\n 'iyr': val_range(2010, 2020), # (Issue Year)\n 'eyr': val_range(2020, 2030), # (Expiration Year)\n 'hgt': val_height, # (Height)\n 'hcl': lambda x: hcl_valid.match(x) is not None, # (Hair Color)\n 'ecl': lambda x: x in ecl, # (Eye Color)\n 'pid': lambda x: passport.match(x) is not None, # (Passport ID)\n #'cid', # (Country ID)\n }\n\n def __init__(self, start_line):\n self._start_line = start_line\n self._lines = []\n self._fields = {}\n \n def add_line(self, line: str):\n self._lines.append(line)\n try:\n for kvp in line.split(\" \"):\n if kvp:\n self.add_field(kvp)\n except:\n print(\"Failed while processing the line: '{}'\".format(line))\n raise\n \n def add_field(self, kvp):\n try:\n field, value = kvp.split(':')\n self._fields[field] = value\n except:\n print(\"Failed to extract from '{}'\".format(kvp) )\n raise\n \n def validate(self) -> bool:\n for field, pred in self.req_fields.items():\n if not field in self._fields:\n print(\"Missing field\", field)\n return False\n elif not pred(self._fields[field]):\n return False\n\n return True\n \n def __str__(self) -> str:\n end_line = self._start_line + len(self._lines) - 1\n return \"{}-{}: {}\".format(self._start_line, end_line, \",\".join(self._lines))\n\ndef part():\n total = 0\n count = 0\n with open('input.txt') as data:\n line_num = 0\n next_passport: Passport = None\n for line in data:\n line_num += 1\n if next_passport is None:\n next_passport = Passport(line_num)\n line = line.strip()\n try:\n if line:\n next_passport.add_line(line)\n else:\n total += 1\n if next_passport.validate():\n count += 1\n print(\"Valid Passport:\", next_passport)\n else:\n print(\"Invalid Passport:\", next_passport)\n\n next_passport = None\n except:\n print(\"Failed on line {}: {}\".format(line_num, line))\n raise\n \n print(\"Processed: {}, Valid: {}\".format(total, count))\n\npart()" }, { "alpha_fraction": 0.4803541898727417, "alphanum_fraction": 0.483397901058197, "avg_line_length": 23.41891860961914, "blob_id": "7eb4cc23498d8fb63f90274bb85d0823803b57d0", "content_id": "a6f4c008d8c81182d8c4fb0a71fc8a64dac54b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3614, "license_type": "no_license", "max_line_length": 67, "num_lines": 148, "path": "/day16/day16sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from functools import reduce\nrules = {}\n\ndef in_range(start, end):\n def pred(x):\n return start <= x and x <= end\n \n return pred\n\ndef in_range_arr(range_arr):\n assert len(range_arr) == 2, \"Range Array must be length 2\"\n return in_range(int(range_arr[0]), int(range_arr[1]))\n\nwith open('input.txt') as data:\n line = next(data).strip()\n print(\"Rules\")\n while line:\n print(\"Rule:\", line)\n name, rule = line.split(': ')\n ranges = [x.split('-') for x in rule.split(' or ')]\n rules[name] = list(map(in_range_arr, ranges))\n\n line = next(data).strip()\n \n # Throw away \"your ticket\"\n print(\"Throwing awway:\", next(data))\n your_ticket = next(data).strip().split(',')\n print(\"Your ticket:\", your_ticket)\n\n print(\"Throwing awway:\", next(data))\n print(\"Throwing awway:\",next(data))\n\n line = next(data).strip()\n print(\"Spied:\")\n\n good = []\n while line:\n # print(\"S:\", line)\n # Parse the line\n vals = list(map(int, line.split(',')))\n # Make sure we can satisfy all rules\n row_valid = True\n for val in vals:\n # Make sure there's a rule this value satisfies.\n valid_value = False\n for r_n, r in rules.items():\n # print(\"Applying Rules\", r_n)\n for sub_r in r:\n if sub_r(val):\n # print(\"{} satisfies {}\".format(val, r_n))\n valid_value = True\n break\n \n # Don't look for more rules\n if valid_value:\n # print(\"{} is valid\".format(val))\n break\n \n if not valid_value:\n # print(\"The invalid we need right now\", val)\n row_valid = False\n \n if row_valid:\n print(\"Row {} is valid\".format(vals))\n good.append(vals)\n else:\n print(\"Row {} is INVALID\".format(vals))\n\n try:\n line = next(data).strip()\n except StopIteration:\n # EOF\n break\n\ndef any_subrule(val, list_rules):\n for f in list_rules:\n if f(val):\n return True\n\n return False\n\npossible = []\n\nct = len(good[0])\nrows = good\nsats = {}\nposs = {}\nfor rn, rule_list in rules.items():\n for i in range(ct):\n # Check if each row satisfies this rule\n valid_for_this_i = True\n for r in rows:\n val_to_check = r[i]\n valid_for_this_i = any_subrule(val_to_check, rule_list)\n\n if not valid_for_this_i:\n break\n\n if valid_for_this_i:\n if not i in sats:\n sats[i] = []\n if not rn in poss:\n poss[rn] = []\n\n sats[i].append(rn)\n poss[rn].append(i)\nprint()\nprint(\"one\")\nprint(sats)\nprint()\nprint(\"two\")\nprint(poss)\n\nsolution = [\"\"] * ct\nsolved = 0\nfound = []\nfound_i = []\nwhile solved < ct:\n new_sats = {}\n for k, v in sats.items():\n if k in found_i:\n continue\n\n for f in found:\n if f in v:\n v.remove(f)\n\n new_sats[k] = v\n \n sats = new_sats\n found = []\n found_i = []\n \n for i, poss in sats.items():\n if len(poss) == 1:\n print(\"Solved: {} == {}\".format(i, poss))\n solution[i] = poss[0]\n solved += 1\n\n found.append(poss[0])\n\nprint(solution)\nmult = 1\nfor i, s in enumerate(solution):\n if s.startswith(\"departure\"):\n mult *= int(your_ticket[i])\n\nprint(mult)\n" }, { "alpha_fraction": 0.4272935688495636, "alphanum_fraction": 0.4334862530231476, "avg_line_length": 24.934524536132812, "blob_id": "dbea937a0e30119becfcc67ce156b86f41ecbdbf", "content_id": "af82bc9d868bc52511ebb0676484eeac8270683c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4360, "license_type": "no_license", "max_line_length": 81, "num_lines": 168, "path": "/day17/day17sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from typing import Deque\n\n\nclass Cube:\n def __init__(self, x, y, z, w):\n self.x = x\n self.y = y\n self.z = z\n self.w = w\n\nclass Board:\n def __init__(self):\n self.max_x = None\n self.min_x = None\n\n self.max_y = None\n self.min_y = None\n\n self.max_z = None\n self.min_z = None\n\n self.max_w = None\n self.min_w = None\n\n self.state = {}\n \n def activate_cube(self, x, y, z, w):\n \"\"\"\n Activate a cube!\n \"\"\"\n self.maybe_update_bounds(x, y, z, w)\n c = Cube(x, y, z, w)\n self.state[(x,y,z,w)] = c\n\n def maybe_update_bounds(self, x, y, z, w):\n if self.max_x is None:\n self.max_x = x\n elif self.max_x < x:\n self.max_x = x\n\n if self.min_x is None:\n self.min_x = x\n elif self.min_x > x:\n self.min_x = x\n \n if self.max_y is None:\n self.max_y = y\n elif y > self.max_y:\n self.max_y = y\n \n if self.min_y is None:\n self.min_y = y\n elif y < self.min_y:\n self.min_y = y\n \n if self.max_z is None:\n self.max_z = z\n elif self.max_z < z:\n self.max_z = z\n\n if self.min_w is None:\n self.min_w = w\n elif z < self.min_w:\n self.min_w = w\n\n\n \n def check_cube(self, x, y, z, w):\n key = (x,y,z,w)\n if not key in self.state:\n return\n \n def get_active_neighbor_count(self,x,y,z, w, cap = -1):\n neighbors = self.get_neighbors(x, y, z, w)\n\n count = 0\n for n in neighbors:\n if n in self.state:\n count += 1 \n \n if cap > 0 and count > cap:\n return count\n \n return count\n\n def get_neighbors(x, y, z, w):\n for d_x in [-1, 0, 1]:\n for d_y in [-1, 0, 1]:\n for d_z in [-1, 0, 1]:\n for d_w in [-1, 0, 1]:\n if d_x == d_y and d_y == d_z and d_z == d_w and d_z == 0:\n continue\n\n yield (x + d_x, y + d_y, z + d_z, w + d_w)\n \n def is_active(self, x, y, z, w):\n return (x,y,z,w) in self.state\n\n def print_layer(self, z_filt):\n for x,y,z in self.state:\n if z == z_filt:\n print(\"({}, {})\".format(x,y))\n\n def iter_board(self) -> 'Board':\n \"\"\"\n Look at ever cube's current state, track its neighbors, be sure not to \n double check cubes\n \"\"\"\n verbose = False\n new_board = Board()\n visited_cubes = set()\n dq = Deque()\n for loc, _ in self.state.items():\n x, y, z, w = loc\n\n visited_cubes.add(loc)\n active_neighbor_count = 0\n\n for n in Board.get_neighbors(x,y,z,w):\n dq.append(n)\n n_x, n_y, n_z, n_w = n\n\n if self.is_active(n_x, n_y, n_z, n_w):\n active_neighbor_count += 1\n\n if active_neighbor_count == 2 or active_neighbor_count == 3:\n new_board.activate_cube(x, y, z, w)\n if(verbose):\n print(\"Activating neighbor! {}\".format(loc))\n \n for loc in dq:\n if loc in visited_cubes:\n continue\n\n x, y, z, w = loc\n visited_cubes.add(loc)\n\n count = 0\n for n in Board.get_neighbors(x,y,z, w):\n if count > 3:\n break\n\n n_x, n_y, n_z, n_w = n\n if self.is_active(n_x, n_y, n_z, n_w):\n count += 1\n \n if count == 3:\n new_board.activate_cube(x, y, z, w)\n \n return new_board\n\n def get_active_cube_count(self):\n return len(self.state)\n \nboard = Board()\n# Build the board\nwith open('input.txt') as data:\n z=w=0\n for y, line in enumerate(data):\n for x, rep in enumerate(line):\n if rep == '#':\n board.activate_cube(x,y,z,w)\n\nprint(\"Starting: \", board.get_active_cube_count())\n# print(board.get_layer)\nfor i in range(6):\n board = board.iter_board()\n print(\"Iteration {} has {} active.\".format(i, board.get_active_cube_count()))\n\n\n\n" }, { "alpha_fraction": 0.5050062537193298, "alphanum_fraction": 0.5156445503234863, "avg_line_length": 24.79032325744629, "blob_id": "d48681e85a089da5235e11ee4b3e86ad01f0df21", "content_id": "85caed89c307c45bc2dc99c2e3fd4c8eabb168a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1598, "license_type": "no_license", "max_line_length": 91, "num_lines": 62, "path": "/day9/day9sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from typing import List\n\n# Read the data\nwindow = 5\ndata = []\nwith open('input.txt') as file:\n for i in file:\n data.append(int(i))\n\ndef find_sum(data: List[int], needle: int, start:int, end: int):\n seen = {}\n # print(\"Searching range:\", start, end)\n for i in range(start, end):\n val = data[i]\n needed = needle - val\n # print(\"Checking for {}={}-{}\".format(needed, needle, val))\n if needed in seen:\n return\n else:\n seen[val] = 1\n\n raise Exception(\"Missing value\", needle)\ndef get_max_min_sum(data, start, end):\n max = min = data[start]\n for i in range(start, end):\n if data[i] < min:\n min = data[i]\n elif data[i] > max:\n max = data[i]\n \n return max + min\n \n\ndef find_contig_sum(needle, data):\n start, end = 0, 1\n seq = data[start] + data[end]\n\n while seq != needle and end < len(data):\n print(\"Loop one ({}, {}) = {}\".format(start, end, seq))\n if seq > needle:\n print(\"Seq too great, decrease\")\n seq -= data[start]\n start += 1\n elif seq < needle:\n end += 1\n seq += data[end]\n \n if seq == needle:\n print(\"Found: ({}, {}) = {}\".format(start, end, get_max_min_sum(data, start, end)))\n\nfind_contig_sum(14360655, data)\n# find_contig_sum(127, data)\n\n# for i in range(window, len(data)):\n# val = data[i]\n\n# start, end = i - window, i\n# try:\n# find_sum(data, data[i], start, end)\n# except Exception as ex:\n# print(\"Success\", ex)\n# exit" }, { "alpha_fraction": 0.45986393094062805, "alphanum_fraction": 0.4721088409423828, "avg_line_length": 20.647058486938477, "blob_id": "a3120abe20169e6c9b27703c7d16830b810ac093", "content_id": "7c20a21929a7728a54acd401a6236dec14186086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 55, "num_lines": 34, "path": "/day6/day6sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from typing import List\n\nbase = ord('a')\ndef an_group(g: List[str]):\n letters = [0 for _ in range(26)]\n for person in g:\n for vote in person:\n ind = ord(vote) - base\n letters[ind] += 1\n\n r = map(lambda x: 1 if x == len(g) else 0, letters)\n\n print(\"Parsed {} to {}\".format(g, r))\n return r\n\ndef part1():\n count = 0\n group = []\n\n with open('input.txt') as data:\n for line in data:\n line = line.strip()\n print(\"Parsing\", line)\n if line == \"\":\n # End of a group\n count += sum(an_group(group))\n group = []\n else:\n group.append(line)\n\n print(\"Total {}\".format(count))\n\n\npart1()" }, { "alpha_fraction": 0.45356234908103943, "alphanum_fraction": 0.46628499031066895, "avg_line_length": 25.6610164642334, "blob_id": "5cf1d49a04dd7928bef3882266e522aa23a0d5de", "content_id": "c45ec509b23bc505a4b3d3c90ceeb6aea363695b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1572, "license_type": "no_license", "max_line_length": 71, "num_lines": 59, "path": "/day3/day3sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "from functools import reduce\nclass InfinteWidthMap:\n\n def __init__(self, filename):\n self.tree_char = '#'\n self.open_char = '.'\n self._base_map = []\n self.height = 0\n\n with open(filename) as map:\n for row_raw in map:\n if not row_raw:\n print(\"Skipping\", row_raw)\n continue\n row = [m for m in row_raw]\n self.width = len(row)\n self._base_map.append(row)\n \n self.height = len(self._base_map)\n \n def get_position(self, x, y):\n a_x = x % self.width\n\n if y > self.height:\n raise Exception(\"Too far!\")\n\n a_char = self._base_map[y][a_x] \n # print(\"Getting ({}, {} = {})\".format(a_x+1, y+1, a_char))\n return a_char\n \n def is_tree(self, x, y):\n c = self.get_position(x,y)\n return c == self.tree_char\n\n def get_height(self):\n return self.height\n\ndef part1():\n m = InfinteWidthMap('input.txt')\n traverses = [(1,1), (3,1), (5,1), (7,1), (1,2)]\n a_t = []\n for d_x, d_y in traverses:\n tree_count = 0\n x = 0\n rows = range(0, m.get_height(), d_y)[1:]\n print(\"doing rows\",rows)\n for y in rows:\n x += d_x\n if m.is_tree(x, y):\n tree_count += 1\n\n a_t.append(tree_count)\n print(\"You ({}, {}) hit {} trees\".format(d_x, d_y, tree_count))\n\n total = reduce(lambda x,y: x*y, a_t)\n\n print(\"The total of {} is: {}\".format(a_t, total))\n\npart1()" }, { "alpha_fraction": 0.5945611596107483, "alphanum_fraction": 0.6081582307815552, "avg_line_length": 22.14285659790039, "blob_id": "445de71b067587bfcd3efbba35bd441bcaa8b945", "content_id": "9f124ffbc17253c1d39a40fe0c492a7529740682", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 809, "license_type": "no_license", "max_line_length": 78, "num_lines": 35, "path": "/day10/day10sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "lines = []\nwith open(\"input.txt\") as data:\n for line in data:\n lines.append(int(line))\n\n# Add starting value\nlines.append(0)\nlines.sort()\n# Add \"power adapter\" value\nlines.append(lines[-1]+3)\n\n# Memorize how many ways there are to reach the end from each value\ncounts = [0] * len(lines)\ncounts[-1] = 1\n\nfor i in range(0, len(lines)):\n # Start at the last value and work forward\n idx = len(lines) - i - 1\n curr_val = lines[idx]\n\n # Search forward until you find a value that is no longer within 3 jolts, \n # and count how many ways those could reach the end.\n count = 0\n for s_i in range(idx, len(lines)):\n n = lines[s_i]\n\n if n-curr_val <= 3:\n count += counts[s_i]\n else:\n break\n \n counts[idx] = count\n\nprint(lines)\nprint(counts)" }, { "alpha_fraction": 0.5077444314956665, "alphanum_fraction": 0.5164569020271301, "avg_line_length": 28.098590850830078, "blob_id": "a5b5f3ee0bc9c4dea666653fcef8d61d04ff2f26", "content_id": "4a3186d1f198283c1bfd773c5e60f5ed5b9d438d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2066, "license_type": "no_license", "max_line_length": 110, "num_lines": 71, "path": "/day7/day7sol.py", "repo_name": "tonyferrell/aoc2020", "src_encoding": "UTF-8", "text": "import re\nall_outer = {\"shiny gold bag\" : 1}\n\nno_count = re.compile(\"(\\d+\\s)?(\\D+ bag)s?\\W*\")\n\nbags = {}\ndef clean_bag(bag, count = False):\n bag = bag.strip()\n m = no_count.match(bag)\n if no_count:\n if not count:\n return m.group(2)\n else:\n count = int(m.group(1)) if m.group(1) is not None else 0\n return m.group(2), count\n else:\n return bag\n\nwith open('input.txt') as data:\n for line in data:\n outer, inner = line.split(\" contain \")\n ib = inner.split(', ')\n bags[clean_bag(outer)] = list(map(lambda x: clean_bag(x, count=True), ib))\n\ndef can_hold(needle, haystack, top):\n print(\"Looking for\", needle)\n if needle in haystack:\n contained_by = haystack.pop(needle)\n for n1 in contained_by:\n if not n1 in top:\n top[n1] = 0\n top[n1] += 1\n can_hold(n1, haystack, top)\n\n# for b, c in bags.items():\n# print(\"{} => {}\".format(b,c))\n\nseen = {}\ndef must_hold(bag, rules, idet = 0):\n verbose = True\n left = idet * \"-\"\n if bag in seen:\n if verbose:\n print(left + \"Cached {} == {}!\".format(bag, seen[bag]))\n return seen[bag]\n\n if not bag in rules: # Should only be \"no other\"\n # if verbose:\n # print(left + \"{} contains nothing x 1\".format(bag))\n return 0\n else:\n # Figure out how many this bag contains\n in_this_bag = 0\n\n for b, count in rules[bag]:\n if verbose:\n print(left + \"{} contains {} x {}\".format(bag, b, count))\n\n bags_in_child = must_hold(b, rules, idet + 4)\n this_contains = (count * bags_in_child) + count \n seen[b] = bags_in_child\n\n in_this_bag += this_contains\n if verbose:\n print(left + \"{} contains {} * {} = {} bags\".format(bag, count, bags_in_child, this_contains))\n \n return in_this_bag\n\nprint(\"Res: {}\".format(must_hold('shiny gold bag', bags)))\nfor k, v in seen.items():\n print(k, \"==>\", v)\n" } ]
17
6306022610113/INEPython
https://github.com/6306022610113/INEPython
f2f00a0989c1699836df35e9f08508ca954196da
e3a349475b1ea975a09dec314d2c3b080c53f517
f0b3d8fb6d49b5ac5155d37c27cfde037e7b6a49
refs/heads/master
2023-02-14T20:46:46.188441
2021-01-12T06:19:37
2021-01-12T06:19:37
281,369,388
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.64462810754776, "alphanum_fraction": 0.6900826692581177, "avg_line_length": 26, "blob_id": "b9f8b19f921ac99db860d517eec5d7fb6832645c", "content_id": "13d1f56124568edcf298e8f8a7969b3ccc7742cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 34, "num_lines": 9, "path": "/week5/basic_str_met2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#create a string with a date.\ndate_string = '11/26/2014'\n#split the date.\ndate_list = date_string.split('/')\n#Display each piece of the date.\nprint('Month:',date_list[0])\nprint('Day:',date_list[1])\nprint('Year:',date_list[2])\nprint(date_list)" }, { "alpha_fraction": 0.3614457845687866, "alphanum_fraction": 0.4397590458393097, "avg_line_length": 19.875, "blob_id": "6b4c8f73b68a67c0c80a5e0069e0f9d315d4b3a4", "content_id": "ac941598930b0dc655fb7cf5fadcf0aeb2cf1c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 29, "num_lines": 8, "path": "/week4/exercise1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(\"---------------\")\nprint(\"KPH \\t MPH\")\nprint(\"---------------\")\n\nfor kph in range (60,140,10):\n mph = kph*0.6214\n print(kph,\"\\t\",\\\n format(mph,',.1f'))" }, { "alpha_fraction": 0.6470588445663452, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 17.600000381469727, "blob_id": "222b254915ce6ae53762a13a3dbd9c58cc9846fc", "content_id": "f8e99432e89ceffc848bc46d24449a9f73a38502", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/test3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "# try block to handle the exception \ntry: \n\tmy_list = [] \n\t\n\twhile True: \n\t\tmy_list.append(int(input())) \n\t\t\n# if the input is not-integer, just print the list \nexcept: \n\tprint(my_list) \n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5166666507720947, "avg_line_length": 19.33333396911621, "blob_id": "b6d1e27c8b1c7028d2ce3b3d3f126ea79b8d51ec", "content_id": "4a1db1476450a3db9062b9b116e8c4155d178fc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/week4/for_loop3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "for i in range(4):\n for j in range(i):\n print(i,j)" }, { "alpha_fraction": 0.5552763938903809, "alphanum_fraction": 0.5552763938903809, "avg_line_length": 27.35714340209961, "blob_id": "6819244eb48d4da299c97ae95dcdd1424b30fa06", "content_id": "1398d80db0688e25c0570985c7db03c10b06e466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 398, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/6306022610113/EX_4.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "\nkeep = 'y'\n\nwhile keep == 'Y' or keep == 'y':\n tf = input('Select topiglatin(T) or Frompiglatin(F) : ')\n sen = input('Input sentence: ')\n\n if tf == 'T' :\n print(\"Results traslation :\".join(topiglatin(sen)))\n elif tf == 'F' :\n print(\"Results traslation :\".join(frompiglatin(sen)))\n else :\n print(\"ERROR\")\n\n keep = print(\"Any more sentence Y(yes) or N(no) :\")\n" }, { "alpha_fraction": 0.6746031641960144, "alphanum_fraction": 0.6746031641960144, "avg_line_length": 17, "blob_id": "8977bfa3fe40d96c0625af0a823cb3281b29218c", "content_id": "eabdcc513115349a7e2d79ccb2e747e74d090302", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/week2/comment.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "\"\"\"\n This is the comments\n Python interpreter igone\n All the text here\n\"\"\"\n#This also Comment\nprint('Hello Comment')\n" }, { "alpha_fraction": 0.4291498064994812, "alphanum_fraction": 0.43967610597610474, "avg_line_length": 26.44444465637207, "blob_id": "feae580e423c745a4aaca22565eb9404e4d4e19f", "content_id": "d11b34aeaee190899fc87ef66335c087fa641cfc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 69, "num_lines": 45, "path": "/week7/exercise1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "\nshow_name_all = 1\nadd_name = 2\ndelete_name = 3\nend = 4\n\ndef main():\n choice = 0\n try:\n while choice != end:\n show_menu()\n choice = int(input('Enter your choice : '))\n if choice == show_name_all:\n infile = open('week7/nameclass.txt','r')\n r = infile.read()\n infile.close()\n print(r)\n elif choice == add_name:\n infile = open('week7/nameclass.txt','a')\n infile.write('\\n'+(str(input(' Enter your name: '))))\n infile.close()\n print('your data add list')\n elif choice == delete_name:\n infile = open('week7/nameclass.txt','r')\n r = infile.read()\n for count in r:\n g += 1 \n \n infile.close()\n\n elif choice == end :\n print('Exiting the program...')\n else:\n print('Error : invalid selection.')\n except Exception as err:\n print(err)\n\n \ndef show_menu():\n print(' MENU')\n print('1) Show Name All')\n print('2) Add Name')\n print('3) Delete Name')\n print('4) Quit')\n\nmain()" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 18.66666603088379, "blob_id": "e842eb149499da5219dd986001f0af3b1e42bf27", "content_id": "81eb30726af81935f4da847e1201f052211fc295", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 38, "num_lines": 15, "path": "/week6/function1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#This program has two funtions.\n#First we define the main funtion.\n\ndef main():\n print('I have a message for you.')\n message()\n print('Goodbye!')\n\n#Next we define the message funtion\ndef message():\n print('I am Anirach,')\n print('I Love Python.')\n\n#Call the main function.\nmain()\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.800000011920929, "avg_line_length": 18.5, "blob_id": "e706dcad31a75ca404c7335307d2745f22cceb7b", "content_id": "4f5a70d6282be9dbc70180d13748b53856dd2992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 40, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/week2/README.md", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "# Newproject01\nmy first project GitHub \n" }, { "alpha_fraction": 0.5573770403862, "alphanum_fraction": 0.5901639461517334, "avg_line_length": 23.66666603088379, "blob_id": "2013db0e142834111d940ce9ef38acf0f5270494", "content_id": "d2f15f59ffaf455edb54453aa43c64d8cfe31981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 305, "license_type": "no_license", "max_line_length": 56, "num_lines": 12, "path": "/week3/exercise2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "hours = int(input('Enter the number of hours worked: '))\nrate = int(input('Enter the hourly pay rate: '))\n\n\nif hours > 40:\n ohour = hours-40\n pay = 40*rate\n pay2 = (rate*1.5)*ohour+pay\n print('the gross pay is $',pay2)\nelse :\n pay = hours*rate \n print('The gross pay is $',pay)\n\n\n\n\n \n" }, { "alpha_fraction": 0.7215189933776855, "alphanum_fraction": 0.7215189933776855, "avg_line_length": 23.75, "blob_id": "33d8f591d89e3689b5782553d1ea13da81269537", "content_id": "707913536274d6f539dc372451d252a7d00c34e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/week5/exam_hints.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "inputmsg = input(\"In put string you want to swap words: \")\nprint(inputmsg.split())\nseparatewords = inputmsg.split()\n\nprint(\"separate words\")\nfor i in separatewords:\n print(i)\n\nprint(\"Modify each words\")\nprint(len(separatewords))\nfor j in range(len(separatewords)):\n separatewords[j] = 'G' + separatewords[j]\n print(separatewords[j])\n\nprint(separatewords)\nprint(\" \".join(separatewords),)" }, { "alpha_fraction": 0.6091954112052917, "alphanum_fraction": 0.6149425506591797, "avg_line_length": 20.875, "blob_id": "af2ea34eeda2ab74ba5df2697258c7fd7aef43a6", "content_id": "4a9ccfaa694403bd6a27402b2ad1fe6e417bf991", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 59, "num_lines": 8, "path": "/week8/writelines.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\n cities = ['New York','\\nBoston','\\nAtlanta','\\nDallas']\n\n outfile = open('week8/cities.txt','w')\n outfile.writelines(cities)\n outfile.close()\n\nmain()" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 17.5, "blob_id": "9d036c17f26266714dea847ac2c9992a47d81b86", "content_id": "b590494445325cf4e35037776e2a9a5cf63e3f56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/week5/upper.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "fruit = 'Apple'\nprint(fruit.upper())" }, { "alpha_fraction": 0.7225806713104248, "alphanum_fraction": 0.7290322780609131, "avg_line_length": 32.28571319580078, "blob_id": "f9644f76cd278ad6497641aeed580d165ec4b708", "content_id": "e743137713f9a651e1f3c0debe273a4c1cf3607e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 44, "num_lines": 14, "path": "/week4/calculating_run.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#This program calculates the sum of a series\n# of number entered by the user.\nmax = 5 #The maximum number\n#Initialize an accumulatir variable.\ntotal = 0.0\n#Explain what we are doing.\nprint('This program calculates the sum of ')\nprint(max,'numbers you will enter.')\n#Get the numbers and accumulate them.\nfor counter in range(max):\n number = int(input('Enter a number : '))\n total = total + number\n#Display the total of the numbers.\nprint('The total is', total)" }, { "alpha_fraction": 0.6208053827285767, "alphanum_fraction": 0.6610738039016724, "avg_line_length": 32.22222137451172, "blob_id": "679f9faae335ef49b55723ac6039934b0f28409f", "content_id": "c066a96370485cde1c94b16aef2354790f1f2b69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/week3/congra.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "test1 = int(input('Enter the score for test 1: '))\ntest2 = int(input('Enter the score for test 2: '))\ntest3 = int(input('Enter the score for test 3: '))\nave = ( test1+test2+test3 )/3\nprint('The average score is',ave)\n\nif ave > 95:\n print('Congratulations!')\n print('That is a great average!')" }, { "alpha_fraction": 0.47887325286865234, "alphanum_fraction": 0.4816901385784149, "avg_line_length": 15.75, "blob_id": "69eba36e280255837e05f3b378aedbfe64c5ac42", "content_id": "1f50dab936720a471055edfee97f936256246b4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 65, "num_lines": 20, "path": "/week7/try_except_finally.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\r\n try:\r\n a,b = map(int, input(\"Input 2 integer values: \").split())\r\n x = divide(a,b)\r\n \r\n except Exception as err:\r\n print(err)\r\n\r\n else:\r\n print(\"result is\",x)\r\n \r\n finally:\r\n print(\"executing finally clause\")\r\n\r\ndef divide(x,y):\r\n result = x/y\r\n return result \r\n \r\n\r\nmain()\r\n" }, { "alpha_fraction": 0.6321243643760681, "alphanum_fraction": 0.6683937907218933, "avg_line_length": 23.1875, "blob_id": "7e2cbe381bd7fda0009387d8eda69c042cf2f90e", "content_id": "de3a0e64e339e94b9634fc634559ad764cc6b9c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 50, "num_lines": 16, "path": "/week6/pass_argu.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#this program demonstra tes a funtion that accepts\n#two arguments.\ndef main():\n print('The sum of 12 and 45 is')\n num1 = int(input('number 1 : '))\n num2 = int(input('number 2 : '))\n show_sum(num1,num2)\n\n#The show_sum function accepts two arguments\n#and displays their sum.\ndef show_sum(num1,num2):\n result = num1 + num2\n print(result)\n\n# call the main function.\nmain()" }, { "alpha_fraction": 0.6520000100135803, "alphanum_fraction": 0.699999988079071, "avg_line_length": 40.66666793823242, "blob_id": "e9d080e6a08929453f882a3b0075bf8f0537a1ff", "content_id": "61582924897b6580382c362d90211dfc00cdcef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 77, "num_lines": 6, "path": "/6306022610113/EX_1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "male = int(input('Input number of male students: '))\nfemale = int(input('Input number of female students: '))\nsum = male + female\nsum1 = male/sum*100\nsum2 = female/sum*100\nprint('There are %d students with %.2f male and %.2f famale'%(sum,sum1,sum2))\n" }, { "alpha_fraction": 0.446153849363327, "alphanum_fraction": 0.5076923370361328, "avg_line_length": 20.66666603088379, "blob_id": "b0f1a6a86ed6ea3172934f9421daf7c2bb75ee60", "content_id": "ebe4a80c9150b642310ba77ac206e8cf91a581c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/week4/for_loop2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "for i in range(1,3):\n for j in range(2,5):\n print(i,j)\n" }, { "alpha_fraction": 0.7121418714523315, "alphanum_fraction": 0.7162346243858337, "avg_line_length": 42.17647171020508, "blob_id": "3c4bc53881bf0ab8ffec0ffc4b798298da326289", "content_id": "e36767a44628fda4950f298bdcbb72d5562168aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 733, "license_type": "no_license", "max_line_length": 56, "num_lines": 17, "path": "/week4/repetition.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#Get a salesperson's sales and commission rate.\nsales = float(input('Enter the amount of sales: '))\ncomm_rate = float(input('Enter the commission rate: '))\ncommission = sales * comm_rate\nprint('The commission is $', format(commission, ',.2f'))\n\n#Get a salesperson's sales and commission rate.\nsales = float(input('Enter the amount of sales: '))\ncomm_rate = float(input('Enter the commission rate: '))\ncommission = sales * comm_rate\nprint('The commission is $', format(commission, ',.2f'))\n\n#Get a salesperson's sales and commission rate.\nsales = float(input('Enter the amount of sales: '))\ncomm_rate = float(input('Enter the commission rate: '))\ncommission = sales * comm_rate\nprint('The commission is $', format(commission, ',.2f'))" }, { "alpha_fraction": 0.582608699798584, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 19.090909957885742, "blob_id": "363f139bf7f5835246ec7f47111b9777e4385d2e", "content_id": "fb8f88e77746abec7ee864047abaab64cc142503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 48, "num_lines": 11, "path": "/week7/filewrite.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\r\n #Open a file named philosophers.txt\r\n outfile = open('week7/philosophers.txt','w')\r\n\r\n outfile.write('John Locke\\n')\r\n outfile.write('David Hume\\n')\r\n outfile.write('Edmund Burke\\n')\r\n\r\n outfile.close()\r\n\r\nmain()" }, { "alpha_fraction": 0.5760233998298645, "alphanum_fraction": 0.6228070259094238, "avg_line_length": 14.285714149475098, "blob_id": "3a1f3ac280aaeb11c02ad5c7aa22d1effa026cb5", "content_id": "b379add83f7f4c9667f82d3dfddc8aa8de7d11a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 30, "num_lines": 21, "path": "/week9/setintro.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "setA = {1,2,3,4}\r\nsetB = set([8,9,10])\r\nsetA.add(5)\r\nsetB.update([6,7])\r\nUset = setA | setB\r\nprint(Uset)\r\nprint(len(Uset))\r\n\r\nsetB.update('ABCD')\r\nsetA.update([6,7,8])\r\nprint(setB)\r\n\r\nprint(setA.intersection(setB))\r\nprint(setA ^ setB)\r\n\r\nsetB.remove('B')\r\nsetB.discard(10)\r\nprint(setB)\r\nprint(setA.clear())\r\nfor val in Uset:\r\n print(val)\r\n" }, { "alpha_fraction": 0.5178316831588745, "alphanum_fraction": 0.5221112966537476, "avg_line_length": 27.29166603088379, "blob_id": "c865c466276e78e519d5fceca67c639d6e08b64a", "content_id": "d0cb71aaa92ca1f6e9da4fe56ccc56bbcb378f9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 59, "num_lines": 24, "path": "/week7/save_emp_records.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\r\n #get the number of employee records to create.\r\n num_emps = int(input('How many employee records' + \\\r\n 'do you want to create? '))\r\n \r\n emp_file = open('week7/employees.txt','w')\r\n\r\n for count in range(1, num_emps + 1):\r\n\r\n print('Enter dete for employees #' , count, sep='')\r\n name = input('Name: ') \r\n id_num = input('ID number: ')\r\n dept = input('Department: ')\r\n\r\n emp_file.write('Name: ',name + '\\n')\r\n emp_file.write('ID: ',id_num + '\\n')\r\n emp_file.write('Department: ',dept + '\\n')\r\n\r\n print()\r\n\r\n emp_file.close()\r\n print('Employees records written to employees.txt')\r\n\r\nmain()" }, { "alpha_fraction": 0.4313725531101227, "alphanum_fraction": 0.4313725531101227, "avg_line_length": 50.5, "blob_id": "2498ee060e82b9d5f51b4cfefcd5e8869e4abde6", "content_id": "f7cb3e3666e754187af3fb322091321cf1c158e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 56, "num_lines": 2, "path": "/week5/format_str.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(' I {} Python. ' .format(' love '))\nprint( ' {} {} {} ' .format('I', 'love', 'Python.'))" }, { "alpha_fraction": 0.6390243768692017, "alphanum_fraction": 0.6536585092544556, "avg_line_length": 33.16666793823242, "blob_id": "84bb5f3298ebec22222d64eb79e2d4c1ca202672", "content_id": "2794fe8f6aa5a6cf157873bdcade98e78f7ecf0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 46, "num_lines": 6, "path": "/week5/exercise.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "fname = input('ENTER YOUR FIRST NAME : ')\nlname = input('ENTER YOUR LAST NAME : ')\nnum = input('ENTER YOUR STUDENT ID NUMBER : ')\n\nprint('YOUR SYSTEM LOGIN NAME IS : ')\nprint(fname[:3]+lname[:3]+num[-3:])\n" }, { "alpha_fraction": 0.5632184147834778, "alphanum_fraction": 0.5881226062774658, "avg_line_length": 21.7391300201416, "blob_id": "a60b7449c027bd4318a8b62177a110f5b32c04ac", "content_id": "d9e61da0af77adabd99f1d4591289ce9277fbd84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 522, "license_type": "no_license", "max_line_length": 47, "num_lines": 23, "path": "/week4/while2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "import random \n\nprint(\"What is my mad]gic number (1 or 100) ?\")\nmynumber = random.randint(1,100)\nntries = 1\nyourguess = -1\nprint(mynumber)\nwhile ntries < 7 and mynumber != yourguess :\n msg = str(ntries) + \">> \"\n if(ntries == 6) :\n print(\"Your last\")\n yourguess = int(input(msg))\n\n if mynumber < yourguess :\n print(\"--> too high\")\n else :\n print(\"--> too low\")\n ntries += 1\n\nif mynumber == yourguess:\n print(\"Yes! it's\" ,mynumber)\nelse :\n print(\"Sorry! my number is\" , mynumber)" }, { "alpha_fraction": 0.4861963093280792, "alphanum_fraction": 0.5, "avg_line_length": 26.16666603088379, "blob_id": "ef33746a7c3d55b1befb2a4b682aea913e784cbb", "content_id": "cb66747d357f8571ab3378e083a1cf78e847073a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 45, "num_lines": 24, "path": "/6306022610113/EX_3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "\ndef massage():\n encode = input('Input Encode Message : ')\n while encode.endswith:\n if encode.endswith('ABD'):\n return 2 \n elif encode.endswith('DEF') :\n return 3 \n elif encode.endswith('GHI'):\n return 4 \n elif encode.endswith('JKL'):\n return 5\n elif encode.endswith('MNO'):\n return 6\n elif encode.endswith('PQRS'):\n return 7\n elif encode.endswith('TUV') :\n return 8\n elif encode.endswith('WXYZ'):\n return 9\n elif encode == ' ':\n return 1\n\n print('The message is : ',sum)\nmassage()" }, { "alpha_fraction": 0.6534653306007385, "alphanum_fraction": 0.6732673048973083, "avg_line_length": 33, "blob_id": "8eab3804fbe23090a178dec794b9bcb2b76c3f64", "content_id": "45a695632d999b75968b69f460239e554ec29cc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 47, "num_lines": 3, "path": "/week4/count_loop.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print('I will display the number 1 through 5.')\nfor name in['Black','Yellow','Blue']:\n print(name)" }, { "alpha_fraction": 0.5281173586845398, "alphanum_fraction": 0.5281173586845398, "avg_line_length": 28.285715103149414, "blob_id": "ecb7bca5f1d532f7f6f84a7d64f9674abbc85a4d", "content_id": "fd68cee239f8921ebbe3dc38def8df3367d20968", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 64, "num_lines": 14, "path": "/week5/test2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\n \n keep = 'y'\n while keep == 'y' or keep == 'Y':\n ft = print(\"Select topiglatin(T) of frompiglatin(F) : \")\n t = input('Input sentence: ')\n if ft_topfrom == 'T':\n print('Results translation: ',t.topiglatin())\n elif ft_topfrom == 'F':\n print('Results translation: ',t.frompiglation())\n print(\"Any more sentence : \")\n return ft\n\nmain()" }, { "alpha_fraction": 0.6689189076423645, "alphanum_fraction": 0.6689189076423645, "avg_line_length": 28.600000381469727, "blob_id": "8d377006b591215c9367ea6146008fdaef6ae2bb", "content_id": "402c79cfe05e7e41de5c9275aa07f680c81c9956", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/week5/basic_str_met.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "balloon = \"Anirach has a balloon.\"\n\nprint(\" \".join(balloon))\nprint(\"\".join(reversed(balloon)))\nprint(\",\".join([\"sharks\",\"crustaceans\",\"plankton\"]))\n" }, { "alpha_fraction": 0.5837838053703308, "alphanum_fraction": 0.5837838053703308, "avg_line_length": 24.85714340209961, "blob_id": "1565bf6b0cd97677cfd296efb9d7300105339f22", "content_id": "d5cb574460e9872482591482e1b227b38085cb74", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 44, "num_lines": 7, "path": "/week4/exercise2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "num = int(input('Enter number : '))\ncolumn = int(input('Enter number column: '))\n\nfor i in range(column):\n for number in range(num):\n print(number,\" \",end=\"\")\n print()\n " }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5542168617248535, "avg_line_length": 24, "blob_id": "a8e0913fb16766814af214db7ee2a8b8a5dda9e4", "content_id": "6388bb3b60eb0a8fcf7869baedc621561570abf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/week5/basic_str_met3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "balloon = ' Anirach has a balloon '\n\nprint('[',balloon.strip(),']')\nprint('[',balloon.lstrip(),']')\nprint('[',balloon.rstrip(),']')\n\nballoon = '###Anirach has a balloon####'\n\nprint('[',balloon.strip('#'),']')\nprint('[',balloon.rstrip('#'),']')" }, { "alpha_fraction": 0.7283950448036194, "alphanum_fraction": 0.7283950448036194, "avg_line_length": 19.5, "blob_id": "adbc7076149b36b752edbde9baab04bed4f6da37", "content_id": "9e5dfdf3367c2cf9694f6db648b87aea9ee159fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 34, "num_lines": 4, "path": "/week5/basic_str_met1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "balloon = \"Anirach has a balloon.\"\n\nprint(balloon.split())\nprint(balloon.split())" }, { "alpha_fraction": 0.5262467265129089, "alphanum_fraction": 0.5524934530258179, "avg_line_length": 24.433332443237305, "blob_id": "09b7375dc9669b2c69fa512e100ed6c6f8dd39e9", "content_id": "9464436087971c34f9428300b581d69ffe7f361a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/week6/value_return1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\n first_name,last_name = get_name()\n print('First name : ',first_name, 'Last name : ',last_name)\n print(password2(first_name,last_name))\n\ndef get_name():\n #Get the user's first and last names.\n first = input('Enter your first name : ')\n last = input('Enter your last name : ')\n #Return both names.\n return first,last\n\ndef password(first,last):\n d = len(last)//2\n if len(last) % 2 == 0 :\n print('password : ',first[:3:]+last[d-2:d+1])\n else :\n print('password : ',first[:3:]+last[d-1:d+2])\n\ndef password2(first,last):\n d = len(last)//2\n if len(last) % 2 == 0 :\n pas = first[:3:]+last[d-2:d+1]\n else :\n pas = first[:3:]+last[d-1:d+2]\n return 'password : ' + pas \n \n\n \nmain()" }, { "alpha_fraction": 0.5070175528526306, "alphanum_fraction": 0.5736842155456543, "avg_line_length": 24.863636016845703, "blob_id": "1b2aaea5584e705f80194ac7dc5232631ce9e36d", "content_id": "588068206663b0efb4954dd707efd92f5e4c5d5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 570, "license_type": "no_license", "max_line_length": 53, "num_lines": 22, "path": "/week3/exercise4.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(\"Please select operation -\")\nprint(\"1. Add\")\nprint(\"2. Subtract\")\nprint(\"3. Multiply\")\nprint(\"4. Divide\")\nnum = int(input(\"Select operations form 1,2,3,4 : \"))\nnum1 = int(input(\"Enter first number : \"))\nnum2 = int(input(\"Enter second number : \"))\nif num == 1 :\n num3 = num1+num2\n print(num1,\"+\",num2,\"=\" ,num3)\nelif num == 2 :\n num3 = num1-num2\n print(num1,\"-\",num2,\"=\" ,num3) \nelif num == 3 :\n num3 = num1*num2\n print(num1,\"*\",num2,\"=\" ,num3)\nelif num == 4 :\n num3 = num1/num2\n print(num1,\"/\",num2,\"=\" ,num3)\nelse :\n print(\"ERROR\")\n\n" }, { "alpha_fraction": 0.725806474685669, "alphanum_fraction": 0.725806474685669, "avg_line_length": 30, "blob_id": "e5707dae465687456f4601f6ebfe17b5ee6e56d0", "content_id": "a7abad01f4296427abb2b0303e68240c7b8a6045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/week5/basic_str_met4.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "balloon = 'Anirach has a balloon'\nprint(balloon.find(\"rach\"))\n" }, { "alpha_fraction": 0.37142857909202576, "alphanum_fraction": 0.48571428656578064, "avg_line_length": 16.25, "blob_id": "b9ed6b2c48120e614e02b006374bff08c7713b69", "content_id": "e955560df560cd9cc4218e7823ff2142016a1abe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 26, "num_lines": 4, "path": "/week5/exercise3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "s1 = 'Ault'\ns2 = \"Kelly\"\nd = len(s1) // 2\nprint(s1[0:d:]+s2+s1[d::])\n\n" }, { "alpha_fraction": 0.7090908885002136, "alphanum_fraction": 0.7090908885002136, "avg_line_length": 17.66666603088379, "blob_id": "35d4eadf4197828aeffb3799875fdcbe639c9f44", "content_id": "915f2726305fabd314c674382018cd9f90434bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 22, "num_lines": 3, "path": "/week5/length_function.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "fruit = 'apple'\nfruit_len = len(fruit)\nprint(fruit_len)" }, { "alpha_fraction": 0.6699507236480713, "alphanum_fraction": 0.674876868724823, "avg_line_length": 32.83333206176758, "blob_id": "1b241465d505afd9b8a5c552e029d7e90054e87e", "content_id": "50137519f4ddd912c0d9b05cd60eeb8d236657fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 203, "license_type": "no_license", "max_line_length": 57, "num_lines": 6, "path": "/week5/basic_str_met5.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "string = 'Four score and seven years ago'\nposition = string.find('seven')\nif position != -1 :\n print('The word \"seven\" was found at index',position)\nelse:\n print('The word \"seven\" was not found.')\n" }, { "alpha_fraction": 0.38906753063201904, "alphanum_fraction": 0.45016077160835266, "avg_line_length": 17.352941513061523, "blob_id": "45d756ddc2eaa16bf192d4a726ae631dbcd70b02", "content_id": "336b2994eaf298d1121b9e1756fc785e78825430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 24, "num_lines": 17, "path": "/week2/num.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "x = 15\ny = 4\n\n#Output: x+y = 19\nprint('x + y = ' ,x+y)\n#Output: x-y = 11\nprint('x - y = ' ,x-y)\n#Output: x*y = 60\nprint('x * y = ' ,x*y)\n#Output: x/y = 3.75\nprint('x / y = ' ,x/y)\n#Output: x%y = 3\nprint('x % y = ' ,x%y)\n#Output: x//y = 3\nprint('x // y = ' ,x//y)\n#Output: x**y = 50625 \nprint('x ** y = ', x**y)" }, { "alpha_fraction": 0.5492957830429077, "alphanum_fraction": 0.5633803009986877, "avg_line_length": 17, "blob_id": "dc4244d49ff51323e522bdcf45a3aedeb920e464", "content_id": "fa9b09f65139fca4510a29cf69d647f6192bb5d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/week5/exercise1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "ss = input('Seach name : ')\ncc = ss.find('ball')\n\nprint('',ss[cc:cc+6])" }, { "alpha_fraction": 0.34302327036857605, "alphanum_fraction": 0.47093021869659424, "avg_line_length": 56.66666793823242, "blob_id": "3e65270b09a459d342a28f4877d44a86c78f764e", "content_id": "16cfb81fd38d85f501c8bcdd29a7366c50e4648c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 58, "num_lines": 3, "path": "/week5/format1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(' {0:8} | {1:>8} ' .format('Fruit' , 'Quantity'))\nprint(' {0:8} | {1:>8.2f} ' .format('Apple' , 2.33333))\nprint(' {0:8} | {1:>8.2f} ' .format('Orange' , 10))" }, { "alpha_fraction": 0.6904761791229248, "alphanum_fraction": 0.6904761791229248, "avg_line_length": 37.08333206176758, "blob_id": "fe334b5c41d9a459cdcb9efb91fe71054bb472b9", "content_id": "5e5e670372f53d7a66a768d4235488c91dc2a19a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/week5/exercise2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "string = input('ENTER A STRING : ')\nprint('This is what I found about that string :')\nif string.isalnum() :\n print('The string is alphanumeric.')\nif string.isalpha() :\n print('The string contains only alphabetic charaters.')\nif string.isdigit() :\n print('The string contains only digits.')\nif string.islower() :\n print('The letter in the string are all lowercase.')\nif string.isupper() :\n print('The letter in the string are all uppercase.')\n\n\n\n\n\n" }, { "alpha_fraction": 0.3597733676433563, "alphanum_fraction": 0.3767705261707306, "avg_line_length": 21.125, "blob_id": "cfb82aee20d852c3ffa5bc7b64490c00b6937ce9", "content_id": "98e9789bb8b260cf26faca04f2ffc22cbe871b6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 353, "license_type": "no_license", "max_line_length": 37, "num_lines": 16, "path": "/2/63/nos.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\n lstn = input(\"YOUR LIST : \")\n list = [lstn.split(\",\")]\n print(lstn)\n for i in range(len(lstn)-1,0,-1):\n for j in range(i):\n if lstn[j]>lstn[j+1]:\n tmp = lstn[j]\n lstn[j] = lstn[j+1]\n lstn[j+1] = tmp\n print(lstn)\n \n\n \n\nmain()" }, { "alpha_fraction": 0.568965494632721, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 28.5, "blob_id": "c5e7e6f76b16e869c87319c5368d4a25f3fe647a", "content_id": "ad410452ebfda026cb4b4a7641dc826dc7d61403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/week5/str_using.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "version = 3\nprint('I love Python '+ str(version) +'.')" }, { "alpha_fraction": 0.48051947355270386, "alphanum_fraction": 0.5454545617103577, "avg_line_length": 12, "blob_id": "beea0c25bca458e482cd667f2ce906dd7f725df3", "content_id": "62e14a10a9e869549227977549898f59b5353741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 77, "license_type": "no_license", "max_line_length": 25, "num_lines": 6, "path": "/week5/str_con1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print('-' * 20)\n\nsupernice = ' Nice ' * 3\nprint(supernice)\n\nprint( '-' * 20)" }, { "alpha_fraction": 0.43832284212112427, "alphanum_fraction": 0.4841156303882599, "avg_line_length": 28.991416931152344, "blob_id": "c9a9ba7e8b48b55a16d5551fb37cca62596221cf", "content_id": "a83ff05c1b5f13265c3e7548949adcf976da817d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6988, "license_type": "no_license", "max_line_length": 110, "num_lines": 233, "path": "/heart/code.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "import pgzrun\nfrom random import randint, choice,random\nimport string\nfrom vocabulary import *\nWIDTH = 1000\nHEIGHT = 600\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n# LETTER = {\"letter\": \"\", \"x\": 0, \"y\": 0}\nON_SCREEN_LETTERS = []\n\nVELOCITY = 5\nSCORE = {\"CORRECT\": 0, \"WRONG\": 0}\ninter = 0\nstop = 0\npu2 = 0\nb = 0\nend = 0\nreply = 0\nplay = Actor('play')\nplay.pos = (500,300)\npu = Actor('pause')\npu.pos = (100,90)\nbackpuu = Actor('backpu2')\nstart2 = Actor('start2')\nendback = Actor('endback')\nendgame = Actor('end')\nreplaygame = Actor('replay')\nresumegame = Actor('resume')\nbackpu = Actor('backpu')\neazy = Actor('eazy')\neazy.pos = (250,200)\nmedium = Actor('medium')\nmedium.pos = (770,200)\nhard = Actor('hard')\nhard.pos = (540,500)\nplayagain = Actor('playagain')\nplayagain.pos = (540,500)\nplayagain2 = 0\neazy2 = 0\nmedium2 = 0\nhard2 = 0\nkeyin = ''\n\nif inter <= 0:\n music.play('manu')\n\ndef draw():\n global pu2,end\n screen.blit('start2',(0,0))\n play.draw()\n if inter == 1:\n screen.clear()\n screen.blit('backpu',(0,0))\n eazy.draw()\n medium.draw()\n hard.draw()\n if eazy2 == 1 or medium2 == 1 or hard2 == 1:\n screen.clear()\n screen.blit('blackground',(0,0))\n pu.draw()\n for LETTER in ON_SCREEN_LETTERS:\n screen.draw.text(LETTER[\"letter\"], (LETTER[\"x\"], LETTER[\"y\"]), fontsize=50, color=WHITE)\n screen.draw.text(\"CORRECT: \" + str(SCORE[\"CORRECT\"]), (WIDTH - 140, 10), fontsize=30, color=WHITE)\n screen.draw.text(\"WRONG: \" + str(SCORE[\"WRONG\"]), (WIDTH - 130, 40), fontsize=30, color=WHITE)\n screen.draw.text(\"TEXT: \" + str(keyin), (200, HEIGHT - 50), fontsize=30, color=WHITE)\n if pu2 == 1:\n screen.clear()\n screen.blit('backpu2',(0,0))\n resumegame.draw()\n resumegame.pos = 540,100\n endgame.draw()\n endgame.pos = 540,300\n replaygame.draw()\n replaygame.pos = 540, 500\n if end == 1:\n screen.clear()\n screen.blit('endback',(0,0))\n playagain.draw()\n screen.draw.text(\"CORRECT: \" + str(SCORE[\"CORRECT\"]), (WIDTH - 570, 200), fontsize=50, color=WHITE)\n screen.draw.text(\"WRONG: \" + str(SCORE[\"WRONG\"]), (WIDTH - 550, 300), fontsize=50, color=WHITE)\n if SCORE[\"WRONG\"] == 3:\n pu2 = 1\n end = 1\n\n \n\ndef on_mouse_down(pos):\n global inter,stop,pu2,b,eazy2,medium2,hard2,ON_SCREEN_LETTERS,end,playagain2\n if stop != 1 and inter <= 1:\n if play.collidepoint(pos):\n if inter >= 0 and inter < 1:\n print(\"Enjoy!\")\n inter += 1\n music.play('play')\n if eazy2 != 1 and medium2 != 1 and hard2 != 1:\n if eazy.collidepoint(pos):\n if inter == 1 and eazy2 < 1:\n eazy2 += 1\n music.play('play')\n if medium.collidepoint(pos):\n if inter == 1 and medium2 < 1:\n medium2 += 1\n music.play('play')\n if hard.collidepoint(pos):\n if inter == 1 and hard2 < 1:\n hard2 += 1\n music.play('play')\n if inter == 1:\n if pu.collidepoint(pos):\n if pu2 >= 0 and pu2 < 1:\n pu2 += 1\n music.play('stop')\n if pu2 == 1:\n if resumegame.collidepoint(pos):\n if inter == 1 and end != 1:\n pu2 -= 1\n music.play('play')\n if pu2 == 1:\n if replaygame.collidepoint(pos):\n if inter == 1 and end != 1:\n SCORE[\"CORRECT\"] = 0 \n SCORE[\"WRONG\"] = 0\n ON_SCREEN_LETTERS = []\n pu2 -= 1\n music.play('play')\n if pu2 == 1:\n if endgame.collidepoint(pos):\n if end >= 0 and end < 1:\n end += 1\n music.play('end')\n if SCORE[\"WRONG\"] == 3 or end == 1:\n if playagain.collidepoint(pos):\n if playagain2 >= 0 and playagain2 < 1:\n if inter == 1:\n inter = 0\n if end == 1:\n end -= 1\n inter += 1\n if pu2 == 1:\n pu2 -= 1\n if eazy2 == 1:\n eazy2 = 0\n if medium2 == 1:\n medium2 = 0\n if hard2 == 1:\n hard2 = 0\n SCORE[\"CORRECT\"] = 0 \n SCORE[\"WRONG\"] = 0\n ON_SCREEN_LETTERS = []\n music.play('manu')\n\"\"\" \ndef overtime():\n global pu2,end\n end = 1\n pu2 = 1\n music.play('end')\n screen.draw.text(\"time: \" + x, (WIDTH - 130, 40), fontsize=30, color=WHITE)\n\nx = clock.schedule(overtime, 10.0)\n\"\"\"\n \n \n \n\ndef update():\n if inter == 1 and pu2 != 1 and eazy2 == 1:\n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if SCORE[\"WRONG\"] == 3:\n music.play('end')\n while len(ON_SCREEN_LETTERS) < 1:\n add_letter()\n if inter == 1 and pu2 != 1 and medium2 == 1:\n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if SCORE[\"WRONG\"] == 3:\n music.play('end')\n while len(ON_SCREEN_LETTERS) < 1:\n add_letter()\n\n if inter == 1 and pu2 != 1 and hard2 == 1:\n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if SCORE[\"WRONG\"] == 3:\n music.play('end')\n while len(ON_SCREEN_LETTERS) < 1:\n add_letter()\n \n\ndef on_key_down(key, mod, unicode):\n global keyin\n if keyboard.BACKSPACE:\n keyin = keyin[:-1]\n else:\n keyin += unicode\n for i,LETTER in enumerate(ON_SCREEN_LETTERS):\n if LETTER[\"letter\"] == keyin:\n SCORE[\"CORRECT\"] += 1\n delete_letter(i)\n keyin = ''\n return\n\ndef add_letter():\n if inter == 1 and eazy2 == 1:\n letter = choice(string.ascii_letters).lower()\n x = randint(10, WIDTH - 100)\n y = 1\n ON_SCREEN_LETTERS.append({\"letter\": letter, \"x\": x, \"y\": y})\n if inter == 1 and medium2 == 1:\n letter = choice(vocabulary2)\n x = randint(10, WIDTH - 100)\n y = 1\n ON_SCREEN_LETTERS.append({\"letter\": letter, \"x\": x, \"y\": y})\n if inter == 1 and hard2 == 1:\n letter = choice(vocabulary3)\n x = randint(10, WIDTH - 100)\n y = 1\n ON_SCREEN_LETTERS.append({\"letter\": letter, \"x\": x, \"y\": y})\n\n\ndef delete_letter(i):\n del ON_SCREEN_LETTERS[i]\npgzrun.go()\n" }, { "alpha_fraction": 0.7534246444702148, "alphanum_fraction": 0.767123281955719, "avg_line_length": 23.66666603088379, "blob_id": "957d0c643a5060ef9f52391010096f7b52b76323", "content_id": "b48163c6b21088e9a458680fe296e08b936238f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "no_license", "max_line_length": 26, "num_lines": 3, "path": "/week2/reservedword.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "first3letters ='ABC'\nfirst_three_letters ='ABC'\nfirstThreeLetters = 'ABC'" }, { "alpha_fraction": 0.5567010045051575, "alphanum_fraction": 0.5824742317199707, "avg_line_length": 19.66666603088379, "blob_id": "18663f74597e769fdd2ab0a03d47cd12d5dfb493", "content_id": "0d4dab67d5d474cdbbac1efda95d153fd76c36ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "no_license", "max_line_length": 45, "num_lines": 18, "path": "/week7/read.numbers.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\r\n\r\n infile = open('week7/numbers.txt','r')\r\n\r\n num1 = int(infile.readline())\r\n num2 = int(infile.readline())\r\n num3 = int(infile.readline())\r\n\r\n infile.close()\r\n\r\n #add the three numbers.\r\n total = num1 + num2 + num3\r\n\r\n #display the number and their total.\r\n print('The number are : ',num1,num2,num3)\r\n print('Their total is : ',total)\r\n\r\nmain()" }, { "alpha_fraction": 0.5369774699211121, "alphanum_fraction": 0.5369774699211121, "avg_line_length": 17.5625, "blob_id": "b61c937c70c3aaf13b249670a9f7e21267a03924", "content_id": "d4a4ddfcdc3cf543017c0626bf4da4911b7e237a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 311, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/week7/display_file.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "def main():\r\n filename = input('Enter a filename: ')\r\n\r\n try:\r\n infile = open(filename,'r')\r\n\r\n contents = infile.read()\r\n\r\n print(contents)\r\n\r\n infile.close()\r\n except IOError:\r\n print('An error occurred trying to read')\r\n print('the file',filename)\r\n\r\nmain()" }, { "alpha_fraction": 0.390625, "alphanum_fraction": 0.453125, "avg_line_length": 64, "blob_id": "3b810a1c1ae91ad767a96f0662546670136b7454", "content_id": "0ec1a94b2e8602f30ad39aedfefc04903f770fde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 64, "num_lines": 1, "path": "/week5/format_str1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(' I {0} {1}. {1} {0}s me. ' .format('love','Python'))" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5824176073074341, "avg_line_length": 23.727272033691406, "blob_id": "b8edaef8d8d0d6184da6d709e1e9ae8efcc6cd87", "content_id": "263e3fe19abde58c277adebb91e5182c5d59b185", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/week5/exercise4.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "str1 = \"English = 78 Science = 83 Math = 68 History = 65 art = 78\"\nprint(str1.split())\nseparatewords = str1.split()\nsum = 0\nk = 0\nfor i in separatewords:\n if i.isnumeric():\n sum = sum + int(i)\n k = k + 1\nprint(\"sum is : \",sum)\nprint(\"Average is :\",sum/k)\n\n" }, { "alpha_fraction": 0.33568075299263, "alphanum_fraction": 0.4131455421447754, "avg_line_length": 37.818180084228516, "blob_id": "6ea034c498faaef149e63243dae17451aeaf1147", "content_id": "e3a4101ce87d94f266f89072edfcf252584a3f01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 440, "license_type": "no_license", "max_line_length": 59, "num_lines": 11, "path": "/week5/format_str2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(' {0:8} | {1:8} ' .format('Fruit' , 'Quantity'))\nprint(' {0:8} | {1:8} ' .format('Apple' , '3'))\nprint(' {0:8} | {1:8} ' .format('Orange' , '10'))\n\n#print(' {0:8} | {1:>8} ' .format('Fruit' , 'Quantity'))\n#print(' {0:8} | {1:>8} ' .format('Apple' , '3'))\n#print(' {0:8} | {1:>8} ' .format('Orange' , '10'))\n#ผลลัพธ์\n#Fruit | Quantity\n#Apple | 3\n#Orange | 10" }, { "alpha_fraction": 0.39416059851646423, "alphanum_fraction": 0.48175182938575745, "avg_line_length": 21.83333396911621, "blob_id": "7ab3253de5dbdf32d9f2c9d9c1d315d5cf00ac47", "content_id": "dd503c2842ddac6bb2b23ccb2605aa9b4ac17bf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 33, "num_lines": 6, "path": "/week4/for_loop.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print(\"Number\",\"\",\"Square\")\nprint(\"--------------\")\n\nfor num in[1,2,3,4,5,6,7,8,9,10]:\n square = num**2\n print( num ,\"\\t\", square)\n" }, { "alpha_fraction": 0.6944444179534912, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 17.5, "blob_id": "8e987d0455867086dc4f4467dae76f1cfaf37358", "content_id": "6b795115985c2aafb3e3e0eaefdff291eb2ca132", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 20, "num_lines": 2, "path": "/week5/lower.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "fruit = 'Apple'\nprint(fruit.lower())" }, { "alpha_fraction": 0.4892241358757019, "alphanum_fraction": 0.5107758641242981, "avg_line_length": 16.884614944458008, "blob_id": "cfa8fccc35559420bc93c7df39a98ea6e7d95cd5", "content_id": "3855dcc16a10cb2736eecfb62983d50629402bcb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 464, "license_type": "no_license", "max_line_length": 49, "num_lines": 26, "path": "/week6/global1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#This program simulates 10 tosses of a cion.\nimport random\n\nHEADS = 1\nTAILS = 2\nTOSSES = 10\nh = 0\nt = 0\ndef main():\n global h\n global t\n for toss in range(TOSSES):\n #Simulate the coin toss.\n if random.randint(HEADS, TAILS) == HEADS:\n print(\"HEADS\")\n h = h + 1\n else:\n print(\"TAILS\")\n t = t + 1\n \n \n print(\"\\nHEADS = \",h)\n print(\"TAILS = \",t)\n\n#Call the main function.\nmain()" }, { "alpha_fraction": 0.6926229596138, "alphanum_fraction": 0.6967213153839111, "avg_line_length": 19.41666603088379, "blob_id": "790f2ef4b8a5bbcea2b986b4ab821e6b5da4c3a9", "content_id": "ba07b6f275dd46af7ae6c274216231d9631e3db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/week6/pass.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "value = int(input('number : '))\ndef main():\n show_double(value)\n\n#The show_double function accepts an argument\n#and displays double its value\ndef show_double(number):\n result = number * 2\n print(result)\n\n#call the main function.\nmain()" }, { "alpha_fraction": 0.7032520174980164, "alphanum_fraction": 0.7195122241973877, "avg_line_length": 19.58333396911621, "blob_id": "bd761942bca486e98ecec379b0d8a9e7095e3f95", "content_id": "af14c1275c14c1bcb1514a1c6c941c0b35b4501e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 246, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/week5/str_boolean1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "movie = \"2001: A SAMMY ODYSSEY\"\nbook = \"A Thousand Splendid Sharks\"\npoem = \"sammy lived in a pretty how town\"\n\nprint(movie.isalnum())\nprint(movie.isupper())\n\nprint(book.istitle())\nprint(book.isupper())\n\nprint(poem.istitle())\nprint(poem.islower())" }, { "alpha_fraction": 0.5201342105865479, "alphanum_fraction": 0.5335570573806763, "avg_line_length": 18.933332443237305, "blob_id": "35b749c0a6d05c17ed98c3b64064a9dacd3668ff", "content_id": "0e67c31bb5bbef351bb60bb167dafbe4132ef77b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 25, "num_lines": 15, "path": "/week2/oper.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "x = 10\ny = 12\n\n#Output: x>y is False\nprint('x > y is ' ,x>y)\n#Output: x<y is True\nprint('x < y is ' ,x<y)\n#Output: x==y is False\nprint('x == y is ' ,x==y)\n#output: x!=y is True\nprint('x != y is' ,x!=y)\n#Output: x>=y is False\nprint('x >= y is ' ,x>=y)\n#Output: x<=y is True\nprint('x <= y is ' ,x<=y)" }, { "alpha_fraction": 0.6642512083053589, "alphanum_fraction": 0.6690821051597595, "avg_line_length": 28.64285659790039, "blob_id": "90ffe9d831faf62732d237e480ea82916ac0b662", "content_id": "c1fb2d2e78b2fe8051e95c506f0f59c91ff898cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 63, "num_lines": 14, "path": "/week6/passargustr.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#This program demontrates passing two string\n#argumant to a function.\ndef reverse_name(first , last):\n print((''.join(reversed(last))),(''.join(reversed(first))))\n print(last[::-1],first[::-1])\n\ndef main():\n first_name = input('Enter your first name:')\n last_name = input('Enter your last name:')\n print('Your name reversed is')\n reverse_name(first_name,last_name)\n\n#Call the main function\nmain()" }, { "alpha_fraction": 0.4564315378665924, "alphanum_fraction": 0.5062240958213806, "avg_line_length": 18.91666603088379, "blob_id": "02beae4c8a66c08ea2f9a8a893679d2227be9db6", "content_id": "c4466afdad3f0adf2582ece80a1d5d6c30a57e73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 241, "license_type": "no_license", "max_line_length": 41, "num_lines": 12, "path": "/6306022610113/EX_2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#import datetime\n\n#mnum = input('What month of 2020: ')\n\n#print(\"=========================\")\n#print(\"S\")\n#print(\"=========================\")\nimport calendar\n\nmnum = int(input('What mount of 2020: '))\n\nprint('\\n',calendar.month(2020,mnum))\n\n " }, { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 34.5, "blob_id": "e2d52ea31cdbc0434e01eb5f3c1659e95119325a", "content_id": "833b46967d66a93a8ac7fdaad469d2f0e22eff9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/week5/basic_str_met6.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "balloon = \"Anirach has a balloon.\"\nprint(balloon.replace(\"has\",\"had\"))" }, { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6532257795333862, "avg_line_length": 16.714284896850586, "blob_id": "073c17c52a1cd1e48568eb45cf16bb9c58a6e5c6", "content_id": "eb6790815d75397f2466e40ea5aa517ea8ce0836", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 124, "license_type": "no_license", "max_line_length": 25, "num_lines": 7, "path": "/week5/str_boolean.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "number = \"5\"\nletters = \"abcdef!\"\nspace = \" \"\n\nprint(number.isnumeric())\nprint(letters.isalpha())\nprint(space.isspace())\n" }, { "alpha_fraction": 0.43071413040161133, "alphanum_fraction": 0.4754122793674469, "avg_line_length": 35.71780776977539, "blob_id": "5e922cf8927cba100813d97ca040915ac62adeb7", "content_id": "5d9801f162c02b95d82752a778e6e8ae9a25eaf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13401, "license_type": "no_license", "max_line_length": 125, "num_lines": 365, "path": "/PRGAME/test1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "import pgzrun\nimport pygame\nfrom random import randint, choice\nimport string\n\nWIDTH = 800\nHEIGHT = 600\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\nbgbutton = pygame.display.set_mode((WIDTH,HEIGHT))\n\nimage = pygame.image.load(r'PRGAME\\images\\img1.jpg')\nimage1 = pygame.image.load(r'PRGAME\\images\\img2.jpg')\nimage2 = pygame.image.load(r'PRGAME\\images\\img3.jpg')\nimage3 = pygame.image.load(r'PRGAME\\images\\img4.jpg')\nimage4 = pygame.image.load(r'PRGAME\\images\\img5.jpg')\n\nrun = True\n\nclass button():\n def __init__(self, color, x,y,width,height, text=''):\n self.color = color\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n self.text = text\n\n def draw(self,screen,outline=None):\n #Call this method to draw the button on the screen\n if outline:\n pygame.draw.rect(screen, outline, (self.x-2,self.y-2,self.width+4,self.height+4),0)\n \n pygame.draw.rect(screen, self.color, (self.x,self.y,self.width,self.height),0)\n if self.text != '':\n font = pygame.font.SysFont('comicsans', 48)\n text = font.render(self.text, 1, (255,255,255))\n screen.blit(text, (self.x + (self.width/2 - text.get_width()/2), self.y + (self.height/2 - text.get_height()/2)))\n\n def isOver(self, pos):\n #Pos is the mouse position or a tuple of (x,y) coordinates\n if pos[0] > self.x and pos[0] < self.x + self.width:\n if pos[1] > self.y and pos[1] < self.y + self.height:\n return True\n \n return False\n\nstartbutton = button((0,0,0),100,170,200,50, 'START')\nleaderbutton = button((0,0,0),100,250,200,50, 'BOARD')\nexitbutton = button((0,0,0),100,330,200,50, 'EXIT')\neasybutton = button((0,0,0),300,200,200,50, 'EASY')\nnormalbutton = button((0,0,0),300,250,200,50, 'NORMAL')\nhardbutton = button((0,0,0),300,300,200,50, 'HARD')\npuasebutton = button((0,0,0),600,530,120,50, 'PUASE')\nhomebutton = button((0,0,0),150,480,200,50, 'HOME')\nmenubutton = button((0,0,0),300,450,200,50, 'EXIT')\nresumebutton = button((0,0,0),300,200,200,50, 'RESUME')\nstartbutton1 = 0\nleaderbutton1 = 0\nexitbutton1 = 0\neasybutton1 = 0\npuasebutton1 = 0\nmenubutton1 = 0\nresumebutton1 = 0\nnormalbutton1 = 0\nhardbutton1 = 0\nhomebutton1 = 0 \npuasebutton1 = 0\n\nvocab = ['cat','dog','ant','fox','face','mask','drop','bag','bird','bad','good']\n\nvocab2 = ['dancer','bread','candy','bingo','chest','night','china','yellow','black','arrive']\n\nvocab3 = ['carefully','quickly','science','rectangle','headache','america','blanket','clothes'\n ,'airport','bathroom','birthday','morning']\n\n# LETTER = {\"letter\": \"\", \"x\": 0, \"y\": 0}\nON_SCREEN_LETTERS = []\nVELOCITY = 2\nSCORE = {\"RIGHT\": 0, \"WRONG\": 0}\nWORD_FONT_SIZE = 50\nMODE_EASY = 1\nMODE_NORMAL = 1\nWORD_LENGTH = 0\ngame_over = False\n\n\ndef draw(): # Pygame Zero draw function\n global SCORE ,ON_SCREEN_LETTERS\n screen.clear()\n bgbutton.blit(image,(0,0))\n startbutton.draw(bgbutton,(0,0,0))\n leaderbutton.draw(bgbutton,(0,0,0))\n exitbutton.draw(bgbutton,(0,0,0))\n \"\"\"\n SCORE[\"RIGHT\"] = 0\n SCORE[\"WRONG\"] = 0\n ON_SCREEN_LETTERS = []\n \"\"\"\n if startbutton1 == 1:\n screen.clear()\n bgbutton.blit(image4,(0,0))\n easybutton.draw(bgbutton,(0,0,0))\n normalbutton.draw(bgbutton,(0,0,0))\n hardbutton.draw(bgbutton,(0,0,0))\n homebutton.draw(bgbutton,(0,0,0))\n if easybutton1 == 1 or normalbutton1 == 1 or hardbutton1 == 1 :\n screen.clear()\n bgbutton.blit(image3,(0,0))\n puasebutton.draw(bgbutton,(0,0,0))\n for LETTER in ON_SCREEN_LETTERS:\n screen.draw.text(LETTER[\"letter\"], (LETTER[\"x\"], LETTER[\"y\"]), fontsize=WORD_FONT_SIZE, color=WHITE)\n screen.draw.text(\"RIGHT: \" + str(SCORE[\"RIGHT\"]), (WIDTH - 130, 10), fontsize=30, color=WHITE)\n screen.draw.text(\"WRONG: \" + str(SCORE[\"WRONG\"]), (WIDTH - 130, 40), fontsize=30, color=WHITE)\n\n if game_over:\n bgbutton.blit(image2,(0,0))\n message = 'Score : '+str(SCORE[\"RIGHT\"])\n screen.draw.text(message, topleft=(300,200),fontsize=50, color=WHITE)\n menubutton.draw(bgbutton,(0,0,0))\n \n if puasebutton1 == 1:\n bgbutton.fill((0,0,0))\n bgbutton.blit(image4,(0,0))\n resumebutton.draw(bgbutton,(0,0,0))\n menubutton.draw(bgbutton,(0,0,0))\n \n \n\n if exitbutton1 == 1:\n pygame.quit()\n quit() \n\n\ndef update():\n if startbutton1 == 1 and easybutton1 == 1: \n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if MODE_EASY == 1:\n while WORD_LENGTH < 1:\n # while len(WORD_LENGTH) < 1:\n add_letter()\n\n if startbutton1 == 1 and normalbutton1 == 1: \n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if MODE_EASY == 1:\n while WORD_LENGTH < 1:\n # while len(WORD_LENGTH) < 1:\n add_letter()\n\n if startbutton1 == 1 and hardbutton1 == 1: \n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n LETTER[\"y\"] += VELOCITY\n if LETTER[\"y\"] == HEIGHT - 5:\n SCORE[\"WRONG\"] += 1\n delete_letter(i)\n if MODE_EASY == 1:\n while WORD_LENGTH < 1:\n # while len(WORD_LENGTH) < 1:\n add_letter()\n\n\ndef on_key_down(key, mod, unicode):\n print(unicode)\n if unicode:\n for i, LETTER in enumerate(ON_SCREEN_LETTERS):\n if LETTER[\"letter\"] == unicode:\n SCORE[\"RIGHT\"] += 1\n delete_letter(i)\n return\n else:\n SCORE[\"WRONG\"] += 1\n\n\ndef add_letter():\n #letter = choice(string.ascii_letters).lower()\n #x = randint(10, WIDTH - 20)\n #y = 1\n #ON_SCREEN_LETTERS.append({\"letter\": letter, \"x\": x, \"y\": y})\n global WORD_LENGTH,startbutton1,easybutton1,normalbutton1\n # letter = choice(string.ascii_letters).lower()\n if startbutton1 == 1 and easybutton1 == 1:\n letter = choice(vocab).lower()\n padding = WORD_FONT_SIZE / 2 \n x = randint(10, 600)\n for i, LETTER in enumerate(letter):\n y = i-1 \n ON_SCREEN_LETTERS.append({\"letter\": LETTER, \"x\": x + (i * padding), \"y\": y})\n WORD_LENGTH += 1\n\n if normalbutton1 == 1 and startbutton1 == 1 :\n letter = choice(vocab2).lower()\n padding = WORD_FONT_SIZE / 2 \n x = randint(10, 600)\n for i, LETTER in enumerate(letter):\n y = i-1 \n ON_SCREEN_LETTERS.append({\"letter\": LETTER, \"x\": x + (i * padding), \"y\": y})\n WORD_LENGTH += 1\n \n if hardbutton1 == 1 and startbutton1 == 1 :\n letter = choice(vocab3).lower()\n padding = WORD_FONT_SIZE / 2 \n x = randint(10, 600)\n for i, LETTER in enumerate(letter):\n y = i-1 \n ON_SCREEN_LETTERS.append({\"letter\": LETTER, \"x\": x + (i * padding), \"y\": y})\n WORD_LENGTH += 1\n \ndef delete_letter(i):\n global WORD_LENGTH\n del ON_SCREEN_LETTERS[i]\n if len(ON_SCREEN_LETTERS) < 1:\n add_letter()\n\ndef time_up():\n global game_over\n game_over = True\n\nclock.schedule(time_up, 180.0)\n\ndef on_mouse_down(pos):\n global startbutton1,leaderbutton1,exitbutton1,easybutton1,normalbutton1,hardbutton1,homebutton1,run,puasebutton1\n global SCORE ,ON_SCREEN_LETTERS\n while run:\n draw()\n pygame.display.update()\n # button = pygame.Rect(100,170,200,50)\n # button1 = pygame.rect(100,250,200,50)\n\n for event in pygame.event.get():\n pos = pygame.mouse.get_pos()\n # mouse = pygame.mouse.get_pos()\n # click = pygame.mouse.get_pressed()\n if event.type == pygame.MOUSEBUTTONDOWN:\n if puasebutton1 != 1 and startbutton1 < 1:\n if startbutton.isOver(pos):\n if startbutton1 < 1:\n startbutton1 += 1\n return startbutton1\n if exitbutton1 <= 1:\n if exitbutton.isOver(pos):\n exitbutton1 += 1\n SCORE[\"RIGHT\"] = 0\n SCORE[\"WRONG\"] = 0\n ON_SCREEN_LETTERS = []\n \n# __________________________________________\n if easybutton1 != 1 and normalbutton1 != 1 and hardbutton1 != 1: \n if easybutton1 <= 1 and startbutton1 == 1:\n if easybutton.isOver(pos):\n easybutton1 += 1\n return easybutton1\n # add_letter()\n if normalbutton1 <= 1 and startbutton1 == 1:\n if normalbutton.isOver(pos):\n normalbutton1 += 1\n return normalbutton1\n if hardbutton1 <= 1 and startbutton1 == 1:\n if hardbutton.isOver(pos):\n hardbutton1 += 1\n return hardbutton1\n# __________________________________________ \n if startbutton1 == 1:\n if puasebutton.isOver(pos):\n if puasebutton1 >= 0 and puasebutton1 < 1:\n puasebutton1 += 1\n if homebutton1 <= 1 :\n if homebutton.isOver(pos):\n if startbutton1 == 1:\n startbutton1 -= 1\n return startbutton1\n if puasebutton1 == 1:\n if resumebutton.isOver(pos):\n if startbutton1 == 1 and menubutton1 != 1:\n puasebutton1 -= 1\n return puasebutton1\n if menubutton1 < 1:\n if menubutton.isOver(pos):\n if startbutton1 == 1:\n startbutton1 -= 1\n return startbutton1\n if easybutton1 == 1:\n easybutton1 -= 1\n return easybutton1\n if normalbutton1 == 1:\n normalbutton1 -= 1\n return normalbutton1\n if hardbutton1 == 1:\n hardbutton1 -= 1\n return hardbutton1\n if puasebutton1 == 1:\n puasebutton1 -= 1\n return puasebutton1\n \n\n \n\n if event.type == pygame.QUIT:\n run = False\n pygame.quit()\n quit()\n\n #def mouse_touch(pos):\n # for event in pygame.event.get():\n if event.type == pygame.MOUSEMOTION:\n if startbutton.isOver(pos):\n startbutton.color = (0,128,0)\n else :\n startbutton.color = (0,0,0)\n # ____________________________________\n if leaderbutton.isOver(pos):\n leaderbutton.color = (128,128,0)\n else :\n leaderbutton.color = (0,0,0)\n # ____________________________________\n if exitbutton.isOver(pos):\n exitbutton.color = (128,0,0)\n else:\n exitbutton.color = (0,0,0)\n # ____________________________________\n if easybutton.isOver(pos):\n easybutton.color = (0,0,128)\n else:\n easybutton.color = (0,0,0)\n # ____________________________________\n if normalbutton.isOver(pos):\n normalbutton.color = (0,0,128)\n else:\n normalbutton.color = (0,0,0)\n # ____________________________________\n if hardbutton.isOver(pos):\n hardbutton.color = (0,0,128)\n else:\n hardbutton.color = (0,0,0)\n # ____________________________________\n if homebutton.isOver(pos):\n homebutton.color = (0,0,128)\n else:\n homebutton.color = (0,0,0)\n # ____________________________________\n if puasebutton.isOver(pos):\n puasebutton.color = (0,0,128)\n else:\n puasebutton.color = (0,0,0)\n # ____________________________________\n if menubutton.isOver(pos):\n menubutton.color = (0,0,128)\n else:\n menubutton.color = (0,0,0)\n # ____________________________________\n if resumebutton.isOver(pos):\n resumebutton.color = (0,0,128)\n else:\n resumebutton.color = (0,0,0)\n # ____________________________________\n\npgzrun.go()" }, { "alpha_fraction": 0.5477855205535889, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 23.520000457763672, "blob_id": "3b4eecd27dfc8cea6f652091659b842819a8d820", "content_id": "e86a01f7b1873ffe729fcf0a16ba5f76781f8bac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4290, "license_type": "no_license", "max_line_length": 72, "num_lines": 175, "path": "/test2.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "import pygame,sys\n\n\npygame.init()\n#############\n#pygame.mixer.music.load('Invincible.mp3')\n#pygame.mixer.music.play()\n\n#############\n\ndisplay_width = 800\ndisplay_height = 600\n\nblack = (0,0,0)\nwhite = (255,255,255)\n\nred = (200,0,0)\ngreen = (0,200,0)\n\nbright_red = (255,0,0)\nbright_green = (0,255,0)\n\nblock_color = (53,115,255)\n\n\ngameDisplay = pygame.display.set_mode((display_width,display_height))\npygame.display.set_caption('One Day After')\nclock = pygame.time.Clock()\n\ngameIcon = pygame.image.load('gameicon.jpg')\npygame.display.set_icon(gameIcon)\n\npause = False\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\n\ndef GameOver():\n ####################################\n pygame.mixer.Sound.play(\"smb_gameover.wav\")\n pygame.mixer.music.stop()\n ####################################\n largeText = pygame.font.SysFont(\"comicsansms\",115)\n TextSurf, TextRect = text_objects(\"Game Over\", largeText)\n TextRect.center = ((display_width/2),(display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n\n\n button(\"Play Again\",150,450,100,50,green,bright_green,game_loop)\n button(\"Quit\",550,450,100,50,red,bright_red,quitgame)\n\n pygame.display.update()\n clock.tick(15) \n\ndef button(msg,x,y,w,h,ic,ac,action=None):\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n\n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n pygame.draw.rect(gameDisplay, ac,(x,y,w,h))\n if click[0] == 1 and action != None:\n pygame.mixer.music.stop()\n action()\n\n else:\n pygame.draw.rect(gameDisplay, ic,(x,y,w,h))\n smallText = pygame.font.SysFont(\"comicsansms\",20)\n textSurf, textRect = text_objects(msg, smallText)\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n gameDisplay.blit(textSurf, textRect)\n\n\ndef quitgame():\n pygame.quit()\n sys.exit()\n quit()\n\ndef unpause():\n global pause\n pygame.mixer.music.unpause()\n pause = False\n\n\ndef paused():\n ############\n pygame.mixer.music.pause()\n #############\n largeText = pygame.font.SysFont(\"comicsansms\",115)\n TextSurf, TextRect = text_objects(\"Paused\", largeText)\n TextRect.center = ((display_width/2),(display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n\n\n while pause:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n\n button(\"Continue\",150,450,100,50,green,bright_green,unpause)\n button(\"Quit\",550,450,100,50,red,bright_red,quitgame)\n\n pygame.display.update()\n clock.tick(15) \n\n\ndef game_intro():\n\n intro = True\n\n while intro:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n\n pilt1 = pygame.image.load('apoc2.jpg').convert()\n gameDisplay.blit(pilt1, [0,0])\n pygame.display.flip()\n\n\n button(\"Start\",150,450,100,50,green,bright_green,game_loop)\n button(\"Quit\",550,450,100,50,red,bright_red,quitgame)\n\n pygame.display.update()\n\ndef game_loop():\n global pause\n\n gameExit = False\n\n while not gameExit:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n quit()\n gameDisplay.fill(white)\n gameDisplaypic = pygame.image.load('back.jpg').convert()\n gameDisplay.blit(gameDisplaypic, [0,0])\n tekst = \"This game will go as far as you choose!\"\n meie_font = pygame.font.SysFont(\"Arial\", 36)\n teksti_pilt = meie_font.render(tekst, False, (50,50,155))\n gameDisplay.blit(teksti_pilt, (100, 250))\n tekst2 = \"You are the smith of your destiny\"\n meie_font = pygame.font.SysFont(\"Arial\", 36)\n teksti_pilt = meie_font.render(tekst2, False, (50,50,155))\n gameDisplay.blit(teksti_pilt, (100, 400))\n button(\"Start playing\",300,500,150,50,green,bright_green)\n pygame.display.update()\n\n\n\n\n\n\n\n\n\ngame_intro()\ngame_loop()\npygame.quit()\nquit()" }, { "alpha_fraction": 0.42105263471603394, "alphanum_fraction": 0.42105263471603394, "avg_line_length": 36.66666793823242, "blob_id": "0dc64ea9738b62c3cb5a107555b858597d151861", "content_id": "f0af87f5b64cb0dcad0bd3f2c69320ae953548c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 114, "license_type": "no_license", "max_line_length": 39, "num_lines": 3, "path": "/week5/str_con.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "print( 'I ' + 'love ' + 'Python.' )\nprint( 'I' + ' love' + ' python.')\nprint ('I' + 'love' + 'python.') " }, { "alpha_fraction": 0.28169015049934387, "alphanum_fraction": 0.3239436745643616, "avg_line_length": 5, "blob_id": "49dbbfec92690ae10af5243b85d9544a58a6117a", "content_id": "b753b921d87edde7b1421c2dfcb6cfd60d634eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 30, "num_lines": 12, "path": "/week2/numm.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "c = 0\nx = 5\ny = 7\n\nc = x\n\nx = y\n\ny = c\n\n\nprint('x = %d y = %d '%(x,y))" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6388888955116272, "avg_line_length": 21.625, "blob_id": "d28dcd27fa0b2a8d19765fc6289f1184a03a1d4e", "content_id": "d322db3d0ebb3938766fd11bc58aec0e07bf60a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 31, "num_lines": 8, "path": "/week4/for_range.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "for x in range(5):\n print('Hello World')\nprint('Range start, end')\nfor num in range(1,5):\n print(num)\nprint('Range start, end, step')\nfor num in range(1,10,2):\n print(num)" }, { "alpha_fraction": 0.5966851115226746, "alphanum_fraction": 0.5966851115226746, "avg_line_length": 25, "blob_id": "8abc32d52c2ed95c9ff5b6218be2a06572e6ffcb", "content_id": "fb5e41b08fc903476c2b0577344ab972bc776ddc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/week4/exercise22.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "number = int(input(\"Enter number : \"))\ncolumn = int(input(\"Enter column munber : \"))\n\nfor i in range(column):\n for num in range(number):\n print(num,\" \",end=\"\")\n print()" }, { "alpha_fraction": 0.6742424368858337, "alphanum_fraction": 0.6742424368858337, "avg_line_length": 21.16666603088379, "blob_id": "93f2b9c1ca89045945f6a48e09b2f7a718c5fe7a", "content_id": "a3755bec602a5dbda30d96f58bd8f331f9034a63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 38, "num_lines": 6, "path": "/week6/function.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#This program demonstrates a function.\n#First, we de\ndef massage():\n print('I am Anirach,')\n print('I Love Python.')\nmassage()" }, { "alpha_fraction": 0.47887325286865234, "alphanum_fraction": 0.47887325286865234, "avg_line_length": 17, "blob_id": "c957681bb44f0adbf72d2547d2eb0ceab275a6c8", "content_id": "f2be59955089e482f7e28291ffbab0384c26896e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 71, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/week2/readmuti.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "a,b,c = input().split()\nprint('a = ',a)\nprint('b = ',b)\nprint('c = ',c)" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 19.16666603088379, "blob_id": "af3341c3192d2fb7352e1e3192c392165d1d7f87", "content_id": "195a37e06b58b92f82ccdf59d521a0e31e859589", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/week4/while.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "#Python program to illustrate\n# while loop\ncount = 0 \nwhile (count < 3):\n count = count + 1\n print(\"Hello Lalana\")" }, { "alpha_fraction": 0.6224783658981323, "alphanum_fraction": 0.6282420754432678, "avg_line_length": 37.66666793823242, "blob_id": "da91a0b01674fd79aee23c93acd3e452a3d334d4", "content_id": "44941827da6f65fdbf3159d07efc4fb8960e0f55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 50, "num_lines": 9, "path": "/week3/if_elif3.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "inchar = input(\"Input one character:\")\nif inchar >= 'A' and inchar <= 'Z':\n print(\"You input Upper case Letter \", inchar)\nelif inchar >= 'a' and inchar <= 'z' : \n print(\"You input Lower Case Letter\" ,inchar)\nelif inchar >= '0' and inchar <= '9':\n print(\"You input Number\" ,inchar)\nelse :\n print(\"It's not a letter or number.\" , inchar)" }, { "alpha_fraction": 0.49438202381134033, "alphanum_fraction": 0.49438202381134033, "avg_line_length": 21.5, "blob_id": "1a89712245cb656f86042fb422cd073c84966261", "content_id": "e890eda016609ec114564b2451dbb5f11b30820a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 41, "num_lines": 4, "path": "/week2/readmuti1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "a,b,c = [int(e) for e in input().split()]\nprint('a = ',a)\nprint('b = ',b)\nprint('c = ',c)" }, { "alpha_fraction": 0.5352941155433655, "alphanum_fraction": 0.6705882549285889, "avg_line_length": 14.545454978942871, "blob_id": "f401474814cbefaf3a0e82aa64a4c8053cc73635", "content_id": "78c2fe87a38782e5ca797c2b5f24663491dfb1ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 170, "license_type": "no_license", "max_line_length": 66, "num_lines": 11, "path": "/week10/pghello.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "import pgzrun\n\nWIDTH = 400\nHEIGHT = 300\n\ndef draw():\n screen.fill((100,200,150))\n screen.draw.text('Hello World', topleft=(100,100),fontsize=30)\n\npgzrun.go()\ndraw()" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.5, "avg_line_length": 6.666666507720947, "blob_id": "9253f0f1da2ccd154416dd3a6854ce6de0181020", "content_id": "4d8b7f27eb5485aa7c804cc54e3508d035696c53", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 9, "num_lines": 3, "path": "/week2/bitwise1.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "x = 10\ny = 4\nprint(~x)" }, { "alpha_fraction": 0.53125, "alphanum_fraction": 0.5416666865348816, "avg_line_length": 22.91666603088379, "blob_id": "7f9dcd74708461f04bfde2ea935d2db5bc0fafa3", "content_id": "53cf4299a4676e3cd0159731748543ec0b7a7b95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 66, "num_lines": 12, "path": "/week4/sentinel.py", "repo_name": "6306022610113/INEPython", "src_encoding": "UTF-8", "text": "item = 'y'\n\n\nwhile item == 'y' or item == 'Y':\n wholesale = float(input(\"Enter the item's wholesale cost : \"))\n price = wholesale * 2.5\n\n print(\"Retail price :\" ,\\\n format (price,',.2f'))\n\n item = input(\"Do you have another item ?\" +\\\n \"(Enter Y or y for yes ) :\")\n\n" } ]
77
rssanders3/airflow-zip-operator-plugin
https://github.com/rssanders3/airflow-zip-operator-plugin
4c6aa0c3834e85b286396a4472bbdd24a801340f
355faf064c56f666cbeb9b0a6001b98a3a7bf27f
0c17d3008678aa5e64f7eeb2e4186dbd9cf08dd9
refs/heads/master
2023-08-07T07:49:30.684726
2017-03-27T21:45:15
2017-03-27T21:45:15
85,875,495
12
8
Apache-2.0
2017-03-22T20:58:24
2023-04-21T07:20:14
2023-07-26T13:00:16
Python
[ { "alpha_fraction": 0.6234545707702637, "alphanum_fraction": 0.6240000128746033, "avg_line_length": 40.66666793823242, "blob_id": "1d2446b30b9d47783c20039814747b4a1be4d8e0", "content_id": "1b4e1173a45bbdd5df27aba6a47caffbd7a8c42c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5500, "license_type": "permissive", "max_line_length": 176, "num_lines": 132, "path": "/zip_operator_plugin.py", "repo_name": "rssanders3/airflow-zip-operator-plugin", "src_encoding": "UTF-8", "text": "__author__ = 'rssanders3'\n\nfrom airflow.plugins_manager import AirflowPlugin\nfrom airflow.models import BaseOperator\nfrom airflow.utils import apply_defaults\nfrom zipfile import ZipFile\nimport os\nimport logging\n\n\"\"\"\nDocumentation References:\n - https://docs.python.org/2/library/zipfile.html\n - https://pymotw.com/2/zipfile/\n\"\"\"\n\nclass ZipOperator(BaseOperator):\n \"\"\"\n An operator which takes in a path to a file and zips the contents to a location you define.\n\n :param path_to_file_to_zip: Full path to the file you want to Zip\n :type path_to_file_to_zip: string\n :param path_to_save_zip: Full path to where you want to save the Zip file\n :type path_to_save_zip: string\n \"\"\"\n\n template_fields = ('path_to_file_to_zip', 'path_to_save_zip')\n template_ext = []\n ui_color = '#ffffff' # ZipOperator's Main Color: white # todo: find better color\n\n @apply_defaults\n def __init__(\n self,\n path_to_file_to_zip,\n path_to_save_zip,\n *args, **kwargs):\n self.path_to_file_to_zip = path_to_file_to_zip\n self.path_to_save_zip = path_to_save_zip\n\n def execute(self, context):\n logging.info(\"Executing ZipOperator.execute(context)\")\n\n logging.info(\"Path to the File to Zip provided by the User (path_to_file_to_zip): \" + str(self.path_to_file_to_zip))\n logging.info(\"Path to save the Zip File provided by the User (path_to_save_zip) : \" + str(self.path_to_save_zip))\n\n dir_path_to_file_to_zip = os.path.dirname(os.path.abspath(self.path_to_file_to_zip))\n logging.info(\"Absolute path to the File to Zip: \" + str(dir_path_to_file_to_zip))\n\n zip_file_name = os.path.basename(self.path_to_save_zip)\n logging.info(\"Zip File Name: \" + str(zip_file_name))\n\n file_to_zip_name = os.path.basename(self.path_to_file_to_zip)\n logging.info(\"Name of the File or Folder to be Zipped: \" + str(file_to_zip_name))\n\n os.chdir(dir_path_to_file_to_zip)\n logging.info(\"Current Working Directory: \" + str(os.getcwd()))\n\n with ZipFile(zip_file_name, 'w') as zip_file:\n logging.info(\"Created zip file object '\" + str(zip_file) + \"' with name '\" + str(zip_file_name) + \"'\")\n is_file = os.path.isfile(self.path_to_file_to_zip)\n logging.info(\"Is the File to Zip a File (else its a folder): \" + str(is_file))\n if is_file:\n logging.info(\"Writing '\" + str(file_to_zip_name) + \"to zip file\")\n zip_file.write(file_to_zip_name)\n else: # is folder\n for dirname, subdirs, files in os.walk(file_to_zip_name):\n logging.info(\"Writing '\" + str(dirname) + \"to zip file\")\n zip_file.write(dirname)\n for filename in files:\n file_name_to_write = os.path.join(dirname, filename)\n logging.info(\"Writing '\" + str(file_name_to_write) + \"to zip file\")\n zip_file.write(file_name_to_write)\n\n # todo: print out contents and results of zip file creation (compression ratio, size, etc)\n\n logging.info(\"Closing Zip File Object\")\n zip_file.close()\n\n logging.info(\"Moving '\" + str(zip_file_name) + \"' to '\" + str(self.path_to_save_zip) + \"'\")\n os.rename(zip_file_name, self.path_to_save_zip)\n\n logging.info(\"Finished executing ZipOperator.execute(context)\")\n\n\nclass UnzipOperator(BaseOperator):\n \"\"\"\n An operator which takes in a path to a zip file and unzips the contents to a location you define.\n\n :param path_to_zip_file: Full path to the zip file you want to Unzip\n :type path_to_zip_file: string\n :param path_to_unzip_contents: Full path to where you want to save the contents of the Zip file you're Unzipping\n :type path_to_unzip_contents: string\n \"\"\"\n\n template_fields = ('path_to_zip_file', 'path_to_unzip_contents')\n template_ext = []\n ui_color = '#ffffff' # UnzipOperator's Main Color: white # todo: find better color\n\n @apply_defaults\n def __init__(\n self,\n path_to_zip_file,\n path_to_unzip_contents,\n *args, **kwargs):\n self.path_to_zip_file = path_to_zip_file\n self.path_to_unzip_contents = path_to_unzip_contents\n\n def execute(self, context):\n logging.info(\"Executing UnzipOperator.execute(context)\")\n\n logging.info(\"path_to_zip_file: \" + str(self.path_to_zip_file))\n logging.info(\"path_to_unzip_contents: \" + str(self.path_to_unzip_contents))\n\n # No check is done if the zip file is valid so that the operator fails when expected so that airflow can properly mark the task as failed and schedule retries as needed\n with ZipFile(self.path_to_zip_file, 'r') as zip_file:\n logging.info(\"Created zip file object '\" + str(zip_file) + \"' from path '\" + str(self.path_to_zip_file) + \"'\")\n logging.info(\"Extracting all the contents to '\" + str(self.path_to_unzip_contents) + \"'\")\n zip_file.extractall(self.path_to_unzip_contents)\n logging.info(\"Closing Zip File Object\")\n zip_file.close()\n\n logging.info(\"Finished executing UnzipOperator.execute(context)\")\n\n\n# Defining the plugin class\nclass ZipOperatorPlugin(AirflowPlugin):\n name = \"zip_operator_plugin\"\n operators = [ZipOperator, UnzipOperator]\n flask_blueprints = []\n hooks = []\n executors = []\n admin_views = []\n menu_links = []\n" }, { "alpha_fraction": 0.6821561455726624, "alphanum_fraction": 0.6840148568153381, "avg_line_length": 30.647058486938477, "blob_id": "c08a71df13c5cd763e9414bdb13d6e34a31725f1", "content_id": "499beb8a038d3d01ef225112bca3517a350c8886", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "permissive", "max_line_length": 120, "num_lines": 34, "path": "/example_dags/zip_and_unzip_operator_example.py", "repo_name": "rssanders3/airflow-zip-operator-plugin", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom airflow.operators import ZipOperator, UnzipOperator\nfrom airflow.models import DAG\nfrom datetime import datetime, timedelta\nimport os\n\nDAG_ID = os.path.basename(__file__).replace(\".pyc\", \"\").replace(\".py\", \"\")\n\nFILE_TO_ZIP_PATH = \"/tmp/input/test.txt\" # location to file or folder to zip\nZIP_FILE_PATH = \"/tmp/output/test.txt.zip\" # location save the zip operation to and read the unzip operator from\nUNZIP_PATH = \"/tmp/output/\" # location to unzip the contents of the file\n\ndefault_args = {\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'retries': 0,\n }\n\ndag = DAG(DAG_ID, default_args=default_args, schedule_interval=None, start_date=(datetime.now() - timedelta(minutes=1)))\n\nzip_task = ZipOperator(\n task_id='zip_task',\n path_to_file_to_zip=FILE_TO_ZIP_PATH,\n path_to_save_zip=ZIP_FILE_PATH,\n dag=dag)\n\nunzip_task = UnzipOperator(\n task_id='unzip_task',\n path_to_zip_file=ZIP_FILE_PATH,\n path_to_unzip_contents=UNZIP_PATH,\n dag=dag)\n\n\nzip_task.set_downstream(unzip_task)\n" }, { "alpha_fraction": 0.6943883895874023, "alphanum_fraction": 0.6964069604873657, "avg_line_length": 27.802326202392578, "blob_id": "24ff4c5125a0b9b3e0da689091a704c147d5baba", "content_id": "c6d894d66441b1e12d7e729839f3a5295774da7a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2477, "license_type": "permissive", "max_line_length": 208, "num_lines": 86, "path": "/README.md", "repo_name": "rssanders3/airflow-zip-operator-plugin", "src_encoding": "UTF-8", "text": "# airflow-zip-operator-plugin\n\n## Description\n\nA plugin to Apache Airflow (Documentation: https://pythonhosted.org/airflow/, Source Code: https://github.com/apache/incubator-airflow) to allow you to run Zip and UnZip commands as an Operator from Workflows\n\n## TODO List\n\n* Print out metrics about zip file (compression, size, etc)\n* Test extensively\n\n## How do Deploy\n\n1. Copy the zip_operator_plugin.py file into the Airflow Plugins directory\n\n * The Airflow Plugins Directory is defined in the airflow.cfg file as the variable \"plugins_folder\"\n \n * The Airflow Plugins Directory is, by default, ${AIRFLOW_HOME}/plugins\n \n * You may have to create the Airflow Plugins Directory folder as it is not created by default\n \n * quick way of doing this:\n \n $ cd {AIRFLOW_PLUGINS_FOLDER}\n $ wget https://raw.githubusercontent.com/rssanders3/airflow-zip-operator-plugin/master/zip_operator_plugin.py\n \n2. Restart the Airflow Services\n\n3. Create or Deploy DAGs which utilize the Operator\n\n4. Your done!\n\n## ZipOperator\n\n### Operator Definition\n\nclass **airflow.operators.ZipOperator**(input_file_path, output_file_path, *args, **kwargs)\n\nBases: **airflow.operators.BaseOperator**\n\nAn operator which takes in a path to a file and zips the contents to a location you define. \n\nParameters:\n\n* **path_to_file_to_zip** (string) - Full path to the file you want to Zip\n* **path_to_save_zip** (string) - Full path to where you want to save the Zip file\n\n### Example\n\n ```\n from airflow.operators import ZipOperator\n \n zip_task = ZipOperator(\n task_id='zip_task',\n path_to_file_to_zip=\"/path/to/file\",\n path_to_save_zip=\"/path/to/file.zip\",\n dag=dag)\n ```\n\n## UnzipOperator\n\n\n### Operator Definition\n\nclass **airflow.operators.UnzipOperator**(input_file_path, output_file_path, *args, **kwargs)\n\nBases: **airflow.operators.BaseOperator**\n\nAn operator which takes in a path to a zip file and unzips the contents to a location you define. \n\nParameters:\n\n* **path_to_zip_file** (string) - Full path to the zip file you want to Unzip\n* **path_to_unzip_contents** (string) - Full path to where you want to save the contents of the Zip file you're Unzipping\n\n### Example\n\n ```\n from airflow.operators import UnzipOperator\n \n unzip_task = UnzipOperator(\n task_id='unzip_task',\n path_to_zip_file=\"/path/to/file.zip\",\n path_to_unzip_contents=\"/path/to/unzip/to/\",\n dag=dag)\n ```\n" } ]
3
MrX1997/JairoSaavedra_Ejercicio27
https://github.com/MrX1997/JairoSaavedra_Ejercicio27
54ce627a6b6d7ac8e3313d6062e478cc13a7d212
63b197d5bd1ff1f489562e7a60fbbefd64837f79
c5cec8b82159b9c55d32bc2f326f5df6277d5f3f
refs/heads/master
2020-04-06T12:54:15.186332
2018-11-14T12:58:05
2018-11-14T12:58:05
157,475,461
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6698564887046814, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 22.11111068725586, "blob_id": "d7c3da519c356aa2d08c40e19eb35fd7d4a35aa4", "content_id": "0c214ab4f7b72165082091ac058f6d24351fc8c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 209, "license_type": "no_license", "max_line_length": 65, "num_lines": 9, "path": "/job.sh", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "#PBS -l nodes=1:ppn=1,mem=2gb,walltime=00:10:00\n#PBS -M [email protected]\n#PBS -m abe\n#PBS -N ejercicio26\n\n\nmodule load anaconda/python3\ncd ja.saavdra # este es el directorio desde donde se ejecuto qsub\nmake -f mkf.mk\n\n" }, { "alpha_fraction": 0.5148515105247498, "alphanum_fraction": 0.6138613820075989, "avg_line_length": 15.666666984558105, "blob_id": "b4a1bef57b65d7318f344385292dc1d4501f6310", "content_id": "ee8614163cc5efea9720210d4c7d7e5ee53a135c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 101, "license_type": "no_license", "max_line_length": 23, "num_lines": 6, "path": "/mkf.mk", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "mpi1.x: MPI1.c\n\tmpicc MPI1.c -o mpi1.x\n\t./mpi1.x \nmpi2.x: MPI2.c\t\n\tmpicc MPI2.c -o mpi2.x\n\t./mpi2.x \n" }, { "alpha_fraction": 0.667870044708252, "alphanum_fraction": 0.7220216393470764, "avg_line_length": 19.774999618530273, "blob_id": "ccd88cc851f559dd727ed51de5651929a37cba59", "content_id": "f7c846a37e7da3e7f0b509bb9c2a1534a97bc3ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 831, "license_type": "no_license", "max_line_length": 60, "num_lines": 40, "path": "/plots.py", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "import matplotlib\nmatplotlib.use(\"Agg\")\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nn=500\nmu=0\nsigma=1\n\nfrom scipy.stats import norm\n\n\nd1=np.genfromtxt('sample_1.dat')\nd2=np.genfromtxt('sample_2.dat')\nd3=np.genfromtxt('sample_3.dat')\nd4=np.genfromtxt('sample_4.dat')\n\nx_axis = np.arange(-10, 10, 0.001)\nplt.subplot(221)\nplt.plot(x_axis, norm.pdf(x_axis,mu,sigma),label='sample 1')\nplt.hist(d1,normed=1)\nplt.legend()\n\nplt.subplot(222)\nplt.plot(x_axis, norm.pdf(x_axis,mu,sigma),label='sample 2')\nplt.hist(d2,normed=1)\nplt.legend()\n\nplt.subplot(223)\nplt.plot(x_axis, norm.pdf(x_axis,mu,sigma),label='sample 3')\nplt.hist(d3,normed=1)\nplt.legend()\n\nplt.subplot(224)\nplt.plot(x_axis, norm.pdf(x_axis,mu,sigma),label='sample 4')\nplt.hist(d4,normed=1)\nplt.legend()\n\nplt.title('Sample vs Real')\nplt.savefig('sample.pdf')\n" }, { "alpha_fraction": 0.5530558824539185, "alphanum_fraction": 0.5924259424209595, "avg_line_length": 17.914894104003906, "blob_id": "b12260f113d289456e709ee386ec99d7e2000e20", "content_id": "77121b4e55363bf74b74e4b5f53758389aa6a284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2667, "license_type": "no_license", "max_line_length": 66, "num_lines": 141, "path": "/sample.cpp", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <time.h>\n#include <stdlib.h>\n#include <math.h>\n#include <fstream>\n#define PI 3.141592653589793\n#include <iostream>\n#include <cstdlib>\n#include <cmath>\n#include <limits>\nusing namespace std;\nstatic const double epsilon = std::numeric_limits<double>::min();\nvoid i1(double *u,int n,double mu,double sigma);\nvoid i2(double *u,int n,double mu,double sigma);\nvoid i3(double *u,int n,double mu,double sigma);\nvoid i4(double *u,int n,double mu,double sigma);\ndouble _rn(void);\ndouble gg(double mu, double sigma);\ntime_t t;\n\n\nint main (int argc, char *argv[] )\n{\n\tif( argc != 4 ) \n\t{\n\t\tcout << \"La entrada debe ser de la forma N mu sigma\\n\", argv[0];\n\t\texit(0);\n }\n int N = atoi(argv[1]);\n\tint mu = atoi(argv[2]);\n\tint sigma = atoi(argv[3]);\n srand((unsigned) time(&t));\n\tdouble n1[N]={};\n\tdouble n2[N]={};\n\tdouble n3[N]={};\n\tdouble n4[N]={};\n\tofstream myfile (\"sample.dat\");\n\n\tint i;\n\tfor(i=0;i<N+1;i++)\n\t{\n\t\t\n\t\tn1[i]=gg(mu,sigma);\n\t\tn2[i]=gg(mu,sigma);\t\n\t\tn3[i]=gg(mu,sigma);\n\t\tn4[i]=gg(mu,sigma);\t\n }\n i1(n1,N,mu,sigma);\n i2(n2,N,mu,sigma);\n i3(n3,N,mu,sigma);\n i4(n4,N,mu,sigma);\n return 0;\n}\ndouble _rn(void)\n{\n return (double) rand()/(RAND_MAX*1.0);\n}\n\n\nvoid i1(double *u,int n,double mu,double sigma)\n{\n\tofstream myfile (\"sample_1.dat\");\n if (myfile.is_open())\n {\n\t\tint i;\n\t\tfor(i=0;i<n+1;i++)\n\t\t{\n\t\t\tmyfile << u[i] << '\\n';\n\t\t}\n myfile.close();\n }\n else cout << \"Unable to open file\";\n}\nvoid i2(double *u,int n,double mu,double sigma)\n{\n\tofstream myfile (\"sample_2.dat\");\n if (myfile.is_open())\n {\n\t\tint i;\n\t\tfor(i=0;i<n+1;i++)\n\t\t{\n\t\t\tmyfile << u[i] << '\\n';\n\t\t}\n myfile.close();\n }\n else cout << \"Unable to open file\";\n}\nvoid i3(double *u,int n,double mu,double sigma)\n{\n\tofstream myfile (\"sample_3.dat\");\n if (myfile.is_open())\n {\n\t\tint i;\n\t\tfor(i=0;i<n+1;i++)\n\t\t{\n\t\t\tmyfile << u[i] << '\\n';\n\t\t}\n myfile.close();\n }\n else cout << \"Unable to open file\";\n}\nvoid i4(double *u,int n,double mu,double sigma)\n{\n\tofstream myfile (\"sample_4.dat\");\n if (myfile.is_open())\n {\n\t\tint i;\n\t\tfor(i=0;i<n+1;i++)\n\t\t{\n\t\t\tmyfile << u[i] << '\\n';\n\t\t}\n myfile.close();\n }\n else cout << \"Unable to open file\";\n}\n\ndouble gg(double mu, double sigma)\n{\n\tstatic const double epsilon = std::numeric_limits<double>::min();\n\tstatic const double two_pi = 2.0*3.14159265358979323846;\n\n\tthread_local double z1;\n\tthread_local bool generate;\n\tgenerate = !generate;\n\n\tif (!generate)\n\t return z1 * sigma + mu;\n\n\tdouble u1, u2;\n\tdo\n\t {\n\t u1 = _rn();\n\t u2 = _rn();\n\t }\n\twhile ( u1 <= epsilon );\n\n\tdouble z0;\n\tz0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2);\n\tz1 = sqrt(-2.0 * log(u1)) * sin(two_pi * u2);\n\treturn z0 * sigma + mu;\n}\n" }, { "alpha_fraction": 0.6702702641487122, "alphanum_fraction": 0.7189189195632935, "avg_line_length": 19.55555534362793, "blob_id": "39971600a5de685d8733cdffe639b9bfa64aad63", "content_id": "83d77053650a539aeda1bc48d628af53a8355be6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 185, "license_type": "no_license", "max_line_length": 37, "num_lines": 9, "path": "/submit_job.sh", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "#PBS -N test_scheduler\n#PBS -l nodes=4:ppn=4\n#PBS -M [email protected]\n#PBS -m abe\n\nmodule load rocks-openmpi_ib\nmodule load anaconda/python3\ncd ja.saavedra\nmpiexec -v -np 4 ./sample.cpp 500 0 1\n" }, { "alpha_fraction": 0.6607142686843872, "alphanum_fraction": 0.7053571343421936, "avg_line_length": 17.5, "blob_id": "9f2405dcb774d53a9a50e25bf341529b1d251a4b", "content_id": "b2f74aa97c7816ed174527f7674469dc8531159d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 112, "license_type": "no_license", "max_line_length": 28, "num_lines": 6, "path": "/makefile", "repo_name": "MrX1997/JairoSaavedra_Ejercicio27", "src_encoding": "UTF-8", "text": "#EXECS=mpi_hello_world\n#MPICC?=mpicc\n\nsample: mpi_hello_world.c\n\tmpic++ -o sample sample.cpp\n\t./sample 500 0 1\n\n" } ]
6
useakat/cfqcd
https://github.com/useakat/cfqcd
de64059011dd2aa3d10082e93802f32839704cef
716edaa077f97175ede5ceb87b6b9b35ac379c81
48fad9ef3e35f008a043b25631b48704fd9ba1ba
refs/heads/master
2021-01-10T02:06:04.142208
2016-02-07T15:01:32
2016-02-07T15:01:32
51,251,541
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3109077215194702, "alphanum_fraction": 0.35226261615753174, "avg_line_length": 25.740917205810547, "blob_id": "41b453b43ece061aeeadf97b4afc37b64c818efa", "content_id": "14162b157ed9e968e2b6719bda0d885664f421c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44904, "license_type": "no_license", "max_line_length": 72, "num_lines": 1679, "path": "/particles.py", "repo_name": "useakat/cfqcd", "src_encoding": "UTF-8", "text": "# This file was automatically created by FeynRules $Revision: 302 $\n# Mathematica version: 7.0 for Mac OS X x86 (64-bit) (November 11, 2008)\n# Date: Tue 31 Aug 2010 16:54:46\n\n\nfrom __future__ import division\nfrom object_library import all_particles, Particle\n\nve = Particle(pdg_code = 12,\n name = 've',\n antiname = 've~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 've',\n antitexname = 've',\n line = 'straight',\n charge = 0,\n LeptonNumber = 1,\n GhostNumber = 0)\n\nve__tilde__ = ve.anti()\n\nvm = Particle(pdg_code = 14,\n name = 'vm',\n antiname = 'vm~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'vm',\n antitexname = 'vm',\n line = 'straight',\n charge = 0,\n LeptonNumber = 1,\n GhostNumber = 0)\n\nvm__tilde__ = vm.anti()\n\nvt = Particle(pdg_code = 16,\n name = 'vt',\n antiname = 'vt~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'vt',\n antitexname = 'vt',\n line = 'straight',\n charge = 0,\n LeptonNumber = 1,\n GhostNumber = 0)\n\nvt__tilde__ = vt.anti()\n\ne__minus__ = Particle(pdg_code = 11,\n name = 'e-',\n antiname = 'e+',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'e-',\n antitexname = 'e-',\n line = 'straight',\n charge = -1,\n LeptonNumber = 1,\n GhostNumber = 0)\n\ne__plus__ = e__minus__.anti()\n\nm__minus__ = Particle(pdg_code = 13,\n name = 'm-',\n antiname = 'm+',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'm-',\n antitexname = 'm-',\n line = 'straight',\n charge = -1,\n LeptonNumber = 1,\n GhostNumber = 0)\n\nm__plus__ = m__minus__.anti()\n\ntt__minus__ = Particle(pdg_code = 15,\n name = 'tt-',\n antiname = 'tt+',\n spin = 2,\n color = 1,\n mass = 'MTA',\n width = 'ZERO',\n texname = 'tt-',\n antitexname = 'tt-',\n line = 'straight',\n charge = -1,\n LeptonNumber = 1,\n GhostNumber = 0)\n\ntt__plus__ = tt__minus__.anti()\n\nu = Particle(pdg_code = 2,\n name = 'u',\n antiname = 'u~',\n spin = 2,\n color = 3,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u',\n antitexname = 'u',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu__tilde__ = u.anti()\n\nc = Particle(pdg_code = 4,\n name = 'c',\n antiname = 'c~',\n spin = 2,\n color = 3,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'c',\n antitexname = 'c',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nc__tilde__ = c.anti()\n\nt = Particle(pdg_code = 6,\n name = 't',\n antiname = 't~',\n spin = 2,\n color = 3,\n mass = 'MT',\n width = 'WT',\n texname = 't',\n antitexname = 't',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nt__tilde__ = t.anti()\n\nd = Particle(pdg_code = 1,\n name = 'd',\n antiname = 'd~',\n spin = 2,\n color = 3,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'd',\n antitexname = 'd',\n line = 'straight',\n charge = -1/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nd__tilde__ = d.anti()\n\ns = Particle(pdg_code = 3,\n name = 's',\n antiname = 's~',\n spin = 2,\n color = 3,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 's',\n antitexname = 's',\n line = 'straight',\n charge = -1/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ns__tilde__ = s.anti()\n\nb = Particle(pdg_code = 5,\n name = 'b',\n antiname = 'b~',\n spin = 2,\n color = 3,\n mass = 'MB',\n width = 'ZERO',\n texname = 'b',\n antitexname = 'b',\n line = 'straight',\n charge = -1/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nb__tilde__ = b.anti()\n\nghA = Particle(pdg_code = 9000001,\n name = 'ghA',\n antiname = 'ghA~',\n spin = -1,\n color = 1,\n mass = 'ZERO',\n width = 'WghA',\n texname = 'ghA',\n antitexname = 'ghA',\n line = 'dotted',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 1)\n\nghA__tilde__ = ghA.anti()\n\nghZ = Particle(pdg_code = 9000002,\n name = 'ghZ',\n antiname = 'ghZ~',\n spin = -1,\n color = 1,\n mass = 'MZ',\n width = 'WghZ',\n texname = 'ghZ',\n antitexname = 'ghZ',\n line = 'dotted',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 1)\n\nghZ__tilde__ = ghZ.anti()\n\nghWp = Particle(pdg_code = 9000003,\n name = 'ghWp',\n antiname = 'ghWp~',\n spin = -1,\n color = 1,\n mass = 'MW',\n width = 'WghWp',\n texname = 'ghWp',\n antitexname = 'ghWp',\n line = 'dotted',\n charge = 1,\n LeptonNumber = 0,\n GhostNumber = 1)\n\nghWp__tilde__ = ghWp.anti()\n\nghWm = Particle(pdg_code = 9000004,\n name = 'ghWm',\n antiname = 'ghWm~',\n spin = -1,\n color = 1,\n mass = 'MW',\n width = 'WghWm',\n texname = 'ghWm',\n antitexname = 'ghWm',\n line = 'dotted',\n charge = -1,\n LeptonNumber = 0,\n GhostNumber = 1)\n\nghWm__tilde__ = ghWm.anti()\n\nghG = Particle(pdg_code = 9000005,\n name = 'ghG',\n antiname = 'ghG~',\n spin = -1,\n color = 8,\n mass = 'ZERO',\n width = 'WghG',\n texname = 'ghG',\n antitexname = 'ghG',\n line = 'dotted',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 1)\n\nghG__tilde__ = ghG.anti()\n\nA = Particle(pdg_code = 22,\n name = 'A',\n antiname = 'A',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'A',\n antitexname = 'A',\n line = 'wavy',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nZ = Particle(pdg_code = 23,\n name = 'Z',\n antiname = 'Z',\n spin = 3,\n color = 1,\n mass = 'MZ',\n width = 'WZ',\n texname = 'Z',\n antitexname = 'Z',\n line = 'wavy',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nW__plus__ = Particle(pdg_code = 24,\n name = 'W+',\n antiname = 'W-',\n spin = 3,\n color = 1,\n mass = 'MW',\n width = 'WW',\n texname = 'W+',\n antitexname = 'W+',\n line = 'wavy',\n charge = 1,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nW__minus__ = W__plus__.anti()\n\nG = Particle(pdg_code = 21,\n name = 'G',\n antiname = 'G',\n spin = 3,\n color = 8,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'G',\n antitexname = 'G',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nH = Particle(pdg_code = 25,\n name = 'H',\n antiname = 'H',\n spin = 1,\n color = 1,\n mass = 'MH',\n width = 'WH',\n texname = '\\\\phi',\n antitexname = '\\\\phi',\n line = 'dashed',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nphi0 = Particle(pdg_code = 250,\n name = 'phi0',\n antiname = 'phi0',\n spin = 1,\n color = 1,\n mass = 'MZ',\n width = 'Wphi',\n texname = 'phi0',\n antitexname = 'phi0',\n line = 'dashed',\n GoldstoneBoson = True,\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nphi__plus__ = Particle(pdg_code = 251,\n name = 'phi+',\n antiname = 'phi-',\n spin = 1,\n color = 1,\n mass = 'MW',\n width = 'Wphi2',\n texname = '\\\\phi^+',\n antitexname = '\\\\phi^+',\n line = 'dashed',\n GoldstoneBoson = True,\n charge = 1,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nphi__minus__ = phi__plus__.anti()\n\n#For gg>8g, uu~>8g and uu>uu6g\n\ng13 = Particle(pdg_code = 9013,\n name = 'g13',\n antiname = 'g13',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g13',\n antitexname = 'g13',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng14 = Particle(pdg_code = 9014,\n name = 'g14',\n antiname = 'g14',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g14',\n antitexname = 'g14',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng15 = Particle(pdg_code = 9015,\n name = 'g15',\n antiname = 'g15',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g15',\n antitexname = 'g15',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng16 = Particle(pdg_code = 9016,\n name = 'g16',\n antiname = 'g16',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g16',\n antitexname = 'g16',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng17 = Particle(pdg_code = 9017,\n name = 'g17',\n antiname = 'g17',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g17',\n antitexname = 'g17',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng18 = Particle(pdg_code = 9018,\n name = 'g18',\n antiname = 'g18',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g18',\n antitexname = 'g18',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng19 = Particle(pdg_code = 9019,\n name = 'g19',\n antiname = 'g19',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g19',\n antitexname = 'g19',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng1a = Particle(pdg_code = 90110,\n name = 'g1a',\n antiname = 'g1a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g1a',\n antitexname = 'g1a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng21 = Particle(pdg_code = 9021,\n name = 'g21',\n antiname = 'g21',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g21',\n antitexname = 'g21',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng24 = Particle(pdg_code = 9024,\n name = 'g24',\n antiname = 'g24',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g24',\n antitexname = 'g24',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng25 = Particle(pdg_code = 9025,\n name = 'g25',\n antiname = 'g25',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g25',\n antitexname = 'g25',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng26 = Particle(pdg_code = 9026,\n name = 'g26',\n antiname = 'g26',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g26',\n antitexname = 'g26',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng27 = Particle(pdg_code = 9027,\n name = 'g27',\n antiname = 'g27',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g27',\n antitexname = 'g27',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng28 = Particle(pdg_code = 9028,\n name = 'g28',\n antiname = 'g28',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g28',\n antitexname = 'g28',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng29 = Particle(pdg_code = 9029,\n name = 'g29',\n antiname = 'g29',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g29',\n antitexname = 'g29',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng2a = Particle(pdg_code = 90210,\n name = 'g2a',\n antiname = 'g2a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g2a',\n antitexname = 'g2a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng31 = Particle(pdg_code = 9031,\n name = 'g31',\n antiname = 'g31',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g31',\n antitexname = 'g31',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng32 = Particle(pdg_code = 9032,\n name = 'g32',\n antiname = 'g32',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g32',\n antitexname = 'g32',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng35 = Particle(pdg_code = 9035,\n name = 'g35',\n antiname = 'g35',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g35',\n antitexname = 'g35',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng36 = Particle(pdg_code = 9036,\n name = 'g36',\n antiname = 'g36',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g36',\n antitexname = 'g36',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng37 = Particle(pdg_code = 9037,\n name = 'g37',\n antiname = 'g37',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g37',\n antitexname = 'g37',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng38 = Particle(pdg_code = 9038,\n name = 'g38',\n antiname = 'g38',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g38',\n antitexname = 'g38',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng39 = Particle(pdg_code = 9039,\n name = 'g39',\n antiname = 'g39',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g39',\n antitexname = 'g39',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng3a = Particle(pdg_code = 90310,\n name = 'g3a',\n antiname = 'g3a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g3a',\n antitexname = 'g3a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng41 = Particle(pdg_code = 9041,\n name = 'g41',\n antiname = 'g41',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g41',\n antitexname = 'g41',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng42 = Particle(pdg_code = 9042,\n name = 'g42',\n antiname = 'g42',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g42',\n antitexname = 'g42',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng43 = Particle(pdg_code = 9043,\n name = 'g43',\n antiname = 'g43',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g43',\n antitexname = 'g43',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng46 = Particle(pdg_code = 9046,\n name = 'g46',\n antiname = 'g46',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g46',\n antitexname = 'g46',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng47 = Particle(pdg_code = 9047,\n name = 'g47',\n antiname = 'g47',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g47',\n antitexname = 'g47',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng48 = Particle(pdg_code = 9048,\n name = 'g48',\n antiname = 'g48',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g48',\n antitexname = 'g48',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng49 = Particle(pdg_code = 9049,\n name = 'g49',\n antiname = 'g49',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g49',\n antitexname = 'g49',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng4a = Particle(pdg_code = 90410,\n name = 'g4a',\n antiname = 'g4a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g4a',\n antitexname = 'g4a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng51 = Particle(pdg_code = 9051,\n name = 'g51',\n antiname = 'g51',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g51',\n antitexname = 'g51',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng52 = Particle(pdg_code = 9052,\n name = 'g52',\n antiname = 'g52',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g52',\n antitexname = 'g52',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng53 = Particle(pdg_code = 9053,\n name = 'g53',\n antiname = 'g53',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g53',\n antitexname = 'g53',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng54 = Particle(pdg_code = 9054,\n name = 'g54',\n antiname = 'g54',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g54',\n antitexname = 'g54',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng57 = Particle(pdg_code = 9057,\n name = 'g57',\n antiname = 'g57',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g57',\n antitexname = 'g57',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng58 = Particle(pdg_code = 9058,\n name = 'g58',\n antiname = 'g58',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g58',\n antitexname = 'g58',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng59 = Particle(pdg_code = 9059,\n name = 'g59',\n antiname = 'g59',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g59',\n antitexname = 'g59',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng5a = Particle(pdg_code = 90510,\n name = 'g5a',\n antiname = 'g5a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g5a',\n antitexname = 'g5a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng61 = Particle(pdg_code = 9061,\n name = 'g61',\n antiname = 'g61',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g61',\n antitexname = 'g61',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng62 = Particle(pdg_code = 9062,\n name = 'g62',\n antiname = 'g62',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g62',\n antitexname = 'g62',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng63 = Particle(pdg_code = 9063,\n name = 'g63',\n antiname = 'g63',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g63',\n antitexname = 'g63',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng64 = Particle(pdg_code = 9064,\n name = 'g64',\n antiname = 'g64',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g64',\n antitexname = 'g64',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng65 = Particle(pdg_code = 9065,\n name = 'g65',\n antiname = 'g65',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g65',\n antitexname = 'g65',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng68 = Particle(pdg_code = 9068,\n name = 'g68',\n antiname = 'g68',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g68',\n antitexname = 'g68',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng69 = Particle(pdg_code = 9069,\n name = 'g69',\n antiname = 'g69',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g69',\n antitexname = 'g69',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng6a = Particle(pdg_code = 90610,\n name = 'g6a',\n antiname = 'g6a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g6a',\n antitexname = 'g6a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng71 = Particle(pdg_code = 9071,\n name = 'g71',\n antiname = 'g71',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g71',\n antitexname = 'g71',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng72 = Particle(pdg_code = 9072,\n name = 'g72',\n antiname = 'g72',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g72',\n antitexname = 'g72',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng73 = Particle(pdg_code = 9073,\n name = 'g73',\n antiname = 'g73',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g73',\n antitexname = 'g73',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng74 = Particle(pdg_code = 9074,\n name = 'g74',\n antiname = 'g74',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g74',\n antitexname = 'g74',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng75 = Particle(pdg_code = 9075,\n name = 'g75',\n antiname = 'g75',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g75',\n antitexname = 'g75',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng76 = Particle(pdg_code = 9076,\n name = 'g76',\n antiname = 'g76',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g76',\n antitexname = 'g76',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng79 = Particle(pdg_code = 9079,\n name = 'g79',\n antiname = 'g79',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g79',\n antitexname = 'g79',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng7a = Particle(pdg_code = 90710,\n name = 'g7a',\n antiname = 'g7a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g7a',\n antitexname = 'g7a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng81 = Particle(pdg_code = 9081,\n name = 'g81',\n antiname = 'g81',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g81',\n antitexname = 'g81',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng82 = Particle(pdg_code = 9082,\n name = 'g82',\n antiname = 'g82',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g82',\n antitexname = 'g82',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng83 = Particle(pdg_code = 9083,\n name = 'g83',\n antiname = 'g83',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g83',\n antitexname = 'g83',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng84 = Particle(pdg_code = 9084,\n name = 'g84',\n antiname = 'g84',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g84',\n antitexname = 'g84',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng85 = Particle(pdg_code = 9085,\n name = 'g85',\n antiname = 'g85',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g85',\n antitexname = 'g85',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng86 = Particle(pdg_code = 9086,\n name = 'g86',\n antiname = 'g86',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g86',\n antitexname = 'g86',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng87 = Particle(pdg_code = 9087,\n name = 'g87',\n antiname = 'g87',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g87',\n antitexname = 'g87',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng8a = Particle(pdg_code = 90810,\n name = 'g8a',\n antiname = 'g8a',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g8a',\n antitexname = 'g8a',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng91 = Particle(pdg_code = 9091,\n name = 'g91',\n antiname = 'g91',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g91',\n antitexname = 'g91',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng92 = Particle(pdg_code = 9092,\n name = 'g92',\n antiname = 'g92',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g92',\n antitexname = 'g92',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng93 = Particle(pdg_code = 9093,\n name = 'g93',\n antiname = 'g93',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g93',\n antitexname = 'g93',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng94 = Particle(pdg_code = 9094,\n name = 'g94',\n antiname = 'g94',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g94',\n antitexname = 'g94',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng95 = Particle(pdg_code = 9095,\n name = 'g95',\n antiname = 'g95',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g95',\n antitexname = 'g95',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng96 = Particle(pdg_code = 9096,\n name = 'g96',\n antiname = 'g96',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g96',\n antitexname = 'g96',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng97 = Particle(pdg_code = 9097,\n name = 'g97',\n antiname = 'g97',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g97',\n antitexname = 'g97',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng98 = Particle(pdg_code = 9098,\n name = 'g98',\n antiname = 'g98',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g98',\n antitexname = 'g98',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga2 = Particle(pdg_code = 9102,\n name = 'ga2',\n antiname = 'ga2',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga2',\n antitexname = 'ga2',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga3 = Particle(pdg_code = 9103,\n name = 'ga3',\n antiname = 'ga3',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga3',\n antitexname = 'ga3',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga4 = Particle(pdg_code = 9104,\n name = 'ga4',\n antiname = 'ga4',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga4',\n antitexname = 'ga4',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga5 = Particle(pdg_code = 9105,\n name = 'ga5',\n antiname = 'ga5',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga5',\n antitexname = 'ga5',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga6 = Particle(pdg_code = 9106,\n name = 'ga6',\n antiname = 'ga6',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga6',\n antitexname = 'ga6',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga7 = Particle(pdg_code = 9107,\n name = 'ga7',\n antiname = 'ga7',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga7',\n antitexname = 'ga7',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga8 = Particle(pdg_code = 9108,\n name = 'ga8',\n antiname = 'ga8',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga8',\n antitexname = 'ga8',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nga9 = Particle(pdg_code = 9109,\n name = 'ga9',\n antiname = 'ga9',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ga9',\n antitexname = 'ga9',\n line = 'curly',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\ng0 = Particle(pdg_code = 9000,\n name = 'g0',\n antiname = 'g0',\n spin = 3,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'g0',\n antitexname = 'g0',\n line = 'wavy',\n charge = 0,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu1 = Particle(pdg_code = 9001,\n name = 'u1',\n antiname = 'u1~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u1',\n antitexname = 'u1',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu1__tilde__ = u1.anti()\n\nu2 = Particle(pdg_code = 9002,\n name = 'u2',\n antiname = 'u2~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u2',\n antitexname = 'u2',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu2__tilde__ = u2.anti()\n\nu3 = Particle(pdg_code = 9003,\n name = 'u3',\n antiname = 'u3~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u3',\n antitexname = 'u3',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu3__tilde__ = u3.anti()\n\nu4 = Particle(pdg_code = 9004,\n name = 'u4',\n antiname = 'u4~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u4',\n antitexname = 'u4',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu4__tilde__ = u4.anti()\n\nu5 = Particle(pdg_code = 9005,\n name = 'u5',\n antiname = 'u5~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u5',\n antitexname = 'u5',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu5__tilde__ = u5.anti()\n\nu6 = Particle(pdg_code = 9006,\n name = 'u6',\n antiname = 'u6~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u6',\n antitexname = 'u6',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu6__tilde__ = u6.anti()\n\nu7 = Particle(pdg_code = 9007,\n name = 'u7',\n antiname = 'u7~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u7',\n antitexname = 'u7',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu7__tilde__ = u7.anti()\n\nu8 = Particle(pdg_code = 9008,\n name = 'u8',\n antiname = 'u8~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u8',\n antitexname = 'u8',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu8__tilde__ = u8.anti()\n\nu9 = Particle(pdg_code = 9009,\n name = 'u9',\n antiname = 'u9~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'u9',\n antitexname = 'u9',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nu9__tilde__ = u9.anti()\n\nua = Particle(pdg_code = 90010,\n name = 'ua',\n antiname = 'ua~',\n spin = 2,\n color = 1,\n mass = 'ZERO',\n width = 'ZERO',\n texname = 'ua',\n antitexname = 'ua',\n line = 'straight',\n charge = 2/3,\n LeptonNumber = 0,\n GhostNumber = 0)\n\nua__tilde__ = ua.anti()\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.30645108222961426, "alphanum_fraction": 0.3879479765892029, "avg_line_length": 32.01485824584961, "blob_id": "e4fbfb5e0244d95d47ae28fa2f1db99cd742ae86", "content_id": "cb5f4e8ed7430e3f338fc072c190ab0f53c584ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 59990, "license_type": "no_license", "max_line_length": 117, "num_lines": 1817, "path": "/vertices.py", "repo_name": "useakat/cfqcd", "src_encoding": "UTF-8", "text": "# This file was automatically created by FeynRules $Revision: 302 $\n# Mathematica version: 7.0 for Mac OS X x86 (64-bit) (November 11, 2008)\n# Date: Tue 31 Aug 2010 16:54:46\n\n\nfrom object_library import all_vertices, Vertex\nimport particles as P\nimport couplings as C\nimport lorentz as L\n\n\nV_1 = Vertex(name = 'V_1',\n particles = [ P.H, P.H, P.H ],\n color = [ '1' ],\n lorentz = [ L.SSS1 ],\n couplings = {(0,0):C.GC_21})\n\nV_2 = Vertex(name = 'V_2',\n particles = [ P.G, P.G, P.G ],\n color = [ 'f(1,2,3)' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_3 = Vertex(name = 'V_3',\n particles = [ P.G, P.G, P.G, P.G ],\n color = [ 'f(2,3,\\'a1\\')*f(\\'a1\\',1,4)', 'f(2,4,\\'a1\\')*f(\\'a1\\',1,3)', 'f(3,4,\\'a1\\')*f(\\'a1\\',1,2)' ],\n lorentz = [ L.VVVV1, L.VVVV3, L.VVVV4 ],\n couplings = {(1,1):C.GC_6,(2,0):C.GC_6,(0,2):C.GC_6})\n\nV_4 = Vertex(name = 'V_4',\n particles = [ P.A, P.W__minus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_16})\n\nV_5 = Vertex(name = 'V_5',\n particles = [ P.W__minus__, P.W__plus__, P.H, P.H ],\n color = [ '1' ],\n lorentz = [ L.VVSS1 ],\n couplings = {(0,0):C.GC_10})\n\nV_6 = Vertex(name = 'V_6',\n particles = [ P.W__minus__, P.W__plus__, P.H ],\n color = [ '1' ],\n lorentz = [ L.VVS1 ],\n couplings = {(0,0):C.GC_22})\n\nV_7 = Vertex(name = 'V_7',\n particles = [ P.A, P.A, P.W__minus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.VVVV2 ],\n couplings = {(0,0):C.GC_18})\n\nV_8 = Vertex(name = 'V_8',\n particles = [ P.W__minus__, P.W__plus__, P.Z ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_7})\n\nV_9 = Vertex(name = 'V_9',\n particles = [ P.W__minus__, P.W__minus__, P.W__plus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.VVVV2 ],\n couplings = {(0,0):C.GC_8})\n\nV_10 = Vertex(name = 'V_10',\n particles = [ P.A, P.W__minus__, P.W__plus__, P.Z ],\n color = [ '1' ],\n lorentz = [ L.VVVV5 ],\n couplings = {(0,0):C.GC_17})\n\nV_11 = Vertex(name = 'V_11',\n particles = [ P.Z, P.Z, P.H, P.H ],\n color = [ '1' ],\n lorentz = [ L.VVSS1 ],\n couplings = {(0,0):C.GC_20})\n\nV_12 = Vertex(name = 'V_12',\n particles = [ P.Z, P.Z, P.H ],\n color = [ '1' ],\n lorentz = [ L.VVS1 ],\n couplings = {(0,0):C.GC_23})\n\nV_13 = Vertex(name = 'V_13',\n particles = [ P.W__minus__, P.W__plus__, P.Z, P.Z ],\n color = [ '1' ],\n lorentz = [ L.VVVV2 ],\n couplings = {(0,0):C.GC_9})\n\nV_14 = Vertex(name = 'V_14',\n particles = [ P.d__tilde__, P.d, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_1})\n\nV_15 = Vertex(name = 'V_15',\n particles = [ P.s__tilde__, P.s, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_1})\n\nV_16 = Vertex(name = 'V_16',\n particles = [ P.b__tilde__, P.b, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_1})\n\nV_17 = Vertex(name = 'V_17',\n particles = [ P.e__plus__, P.e__minus__, P.A ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_3})\n\nV_18 = Vertex(name = 'V_18',\n particles = [ P.m__plus__, P.m__minus__, P.A ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_3})\n\nV_19 = Vertex(name = 'V_19',\n particles = [ P.tt__plus__, P.tt__minus__, P.A ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_3})\n\nV_20 = Vertex(name = 'V_20',\n particles = [ P.u__tilde__, P.u, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_2})\n\nV_21 = Vertex(name = 'V_21',\n particles = [ P.c__tilde__, P.c, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_2})\n\nV_22 = Vertex(name = 'V_22',\n particles = [ P.t__tilde__, P.t, P.A ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_2})\n\nV_23 = Vertex(name = 'V_23',\n particles = [ P.d__tilde__, P.d, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_24 = Vertex(name = 'V_24',\n particles = [ P.s__tilde__, P.s, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_25 = Vertex(name = 'V_25',\n particles = [ P.b__tilde__, P.b, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_26 = Vertex(name = 'V_26',\n particles = [ P.b__tilde__, P.b, P.H ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFS1 ],\n couplings = {(0,0):C.GC_24})\n\nV_27 = Vertex(name = 'V_27',\n particles = [ P.d__tilde__, P.d, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV3 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_14})\n\nV_28 = Vertex(name = 'V_28',\n particles = [ P.s__tilde__, P.s, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV3 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_14})\n\nV_29 = Vertex(name = 'V_29',\n particles = [ P.b__tilde__, P.b, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV3 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_14})\n\nV_30 = Vertex(name = 'V_30',\n particles = [ P.d__tilde__, P.u, P.W__minus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_31 = Vertex(name = 'V_31',\n particles = [ P.s__tilde__, P.c, P.W__minus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_32 = Vertex(name = 'V_32',\n particles = [ P.b__tilde__, P.t, P.W__minus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_33 = Vertex(name = 'V_33',\n particles = [ P.u__tilde__, P.d, P.W__plus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_34 = Vertex(name = 'V_34',\n particles = [ P.c__tilde__, P.s, P.W__plus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_35 = Vertex(name = 'V_35',\n particles = [ P.t__tilde__, P.b, P.W__plus__ ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_36 = Vertex(name = 'V_36',\n particles = [ P.u__tilde__, P.u, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_37 = Vertex(name = 'V_37',\n particles = [ P.c__tilde__, P.c, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_38 = Vertex(name = 'V_38',\n particles = [ P.t__tilde__, P.t, P.G ],\n color = [ 'T(3,2,1)' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GC_5})\n\nV_39 = Vertex(name = 'V_39',\n particles = [ P.tt__plus__, P.tt__minus__, P.H ],\n color = [ '1' ],\n lorentz = [ L.FFS1 ],\n couplings = {(0,0):C.GC_26})\n\nV_40 = Vertex(name = 'V_40',\n particles = [ P.t__tilde__, P.t, P.H ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFS1 ],\n couplings = {(0,0):C.GC_25})\n\nV_41 = Vertex(name = 'V_41',\n particles = [ P.e__plus__, P.e__minus__, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2, L.FFV4 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_15})\n\nV_42 = Vertex(name = 'V_42',\n particles = [ P.m__plus__, P.m__minus__, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2, L.FFV4 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_15})\n\nV_43 = Vertex(name = 'V_43',\n particles = [ P.tt__plus__, P.tt__minus__, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2, L.FFV4 ],\n couplings = {(0,0):C.GC_12,(0,1):C.GC_15})\n\nV_44 = Vertex(name = 'V_44',\n particles = [ P.e__plus__, P.ve, P.W__minus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_45 = Vertex(name = 'V_45',\n particles = [ P.m__plus__, P.vm, P.W__minus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_46 = Vertex(name = 'V_46',\n particles = [ P.tt__plus__, P.vt, P.W__minus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_47 = Vertex(name = 'V_47',\n particles = [ P.ve__tilde__, P.e__minus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_48 = Vertex(name = 'V_48',\n particles = [ P.vm__tilde__, P.m__minus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_49 = Vertex(name = 'V_49',\n particles = [ P.vt__tilde__, P.tt__minus__, P.W__plus__ ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_11})\n\nV_50 = Vertex(name = 'V_50',\n particles = [ P.u__tilde__, P.u, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV5 ],\n couplings = {(0,0):C.GC_13,(0,1):C.GC_14})\n\nV_51 = Vertex(name = 'V_51',\n particles = [ P.c__tilde__, P.c, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV5 ],\n couplings = {(0,0):C.GC_13,(0,1):C.GC_14})\n\nV_52 = Vertex(name = 'V_52',\n particles = [ P.t__tilde__, P.t, P.Z ],\n color = [ 'Identity(1,2)' ],\n lorentz = [ L.FFV2, L.FFV5 ],\n couplings = {(0,0):C.GC_13,(0,1):C.GC_14})\n\nV_53 = Vertex(name = 'V_53',\n particles = [ P.ve__tilde__, P.ve, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_19})\n\nV_54 = Vertex(name = 'V_54',\n particles = [ P.vm__tilde__, P.vm, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_19})\n\nV_55 = Vertex(name = 'V_55',\n particles = [ P.vt__tilde__, P.vt, P.Z ],\n color = [ '1' ],\n lorentz = [ L.FFV2 ],\n couplings = {(0,0):C.GC_19})\n\n#For gg>8g, uu~>8g and uu>uu6g\n\nV_56 = Vertex(name = 'V_56',\n particles = [ P.g21, P.g32, P.g13 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_57 = Vertex(name = 'V_57',\n particles = [ P.g21, P.g42, P.g14 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_58 = Vertex(name = 'V_58',\n particles = [ P.g21, P.g52, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_59 = Vertex(name = 'V_59',\n particles = [ P.g21, P.g62, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_60 = Vertex(name = 'V_60',\n particles = [ P.g21, P.g72, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_61 = Vertex(name = 'V_61',\n particles = [ P.g21, P.g82, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_62 = Vertex(name = 'V_62',\n particles = [ P.g31, P.g43, P.g14 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_63 = Vertex(name = 'V_63',\n particles = [ P.g41, P.g54, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_64 = Vertex(name = 'V_64',\n particles = [ P.g51, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_65 = Vertex(name = 'V_65',\n particles = [ P.g61, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_66 = Vertex(name = 'V_66',\n particles = [ P.g71, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_67 = Vertex(name = 'V_67',\n particles = [ P.g32, P.g43, P.g24 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_68 = Vertex(name = 'V_68',\n particles = [ P.g32, P.g53, P.g25 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_69 = Vertex(name = 'V_69',\n particles = [ P.g32, P.g63, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_70 = Vertex(name = 'V_70',\n particles = [ P.g32, P.g73, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_71 = Vertex(name = 'V_71',\n particles = [ P.g32, P.g83, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_72 = Vertex(name = 'V_72',\n particles = [ P.g42, P.g54, P.g25 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_73 = Vertex(name = 'V_73',\n particles = [ P.g52, P.g65, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_74 = Vertex(name = 'V_74',\n particles = [ P.g62, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_75 = Vertex(name = 'V_75',\n particles = [ P.g72, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_76 = Vertex(name = 'V_76',\n particles = [ P.g43, P.g54, P.g35 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_77 = Vertex(name = 'V_77',\n particles = [ P.g43, P.g64, P.g36 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_78 = Vertex(name = 'V_78',\n particles = [ P.g43, P.g74, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_79 = Vertex(name = 'V_79',\n particles = [ P.g43, P.g84, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_80 = Vertex(name = 'V_80',\n particles = [ P.g53, P.g65, P.g36 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_81 = Vertex(name = 'V_81',\n particles = [ P.g63, P.g76, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_82 = Vertex(name = 'V_82',\n particles = [ P.g73, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_83 = Vertex(name = 'V_83',\n particles = [ P.g54, P.g65, P.g46 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_84 = Vertex(name = 'V_84',\n particles = [ P.g54, P.g75, P.g47 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_85 = Vertex(name = 'V_85',\n particles = [ P.g54, P.g85, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_86 = Vertex(name = 'V_86',\n particles = [ P.g64, P.g76, P.g47 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_87 = Vertex(name = 'V_87',\n particles = [ P.g74, P.g87, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_88 = Vertex(name = 'V_88',\n particles = [ P.g65, P.g76, P.g57 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_89 = Vertex(name = 'V_89',\n particles = [ P.g65, P.g86, P.g58 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_90 = Vertex(name = 'V_90',\n particles = [ P.g75, P.g87, P.g58 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_91 = Vertex(name = 'V_91',\n particles = [ P.g76, P.g87, P.g68 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\nV_92 = Vertex(name = 'V_92',\n particles = [ P.g87, P.g98, P.g79 ],\n color = [ '1' ],\n lorentz = [ L.VVV1 ],\n couplings = {(0,0):C.GC_4})\n\n# 4-point\n\nV_93 = Vertex(name = 'V_93',\n particles = [ P.g21, P.g32, P.g43, P.g14 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_94 = Vertex(name = 'V_94',\n particles = [ P.g21, P.g32, P.g53, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_95 = Vertex(name = 'V_95',\n particles = [ P.g21, P.g32, P.g63, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_96 = Vertex(name = 'V_96',\n particles = [ P.g21, P.g32, P.g73, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_97 = Vertex(name = 'V_97',\n particles = [ P.g21, P.g32, P.g83, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_98 = Vertex(name = 'V_98',\n particles = [ P.g21, P.g42, P.g54, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_99 = Vertex(name = 'V_99',\n particles = [ P.g21, P.g52, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_100 = Vertex(name = 'V_100',\n particles = [ P.g21, P.g62, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_101 = Vertex(name = 'V_101',\n particles = [ P.g21, P.g72, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_102 = Vertex(name = 'V_102',\n particles = [ P.g31, P.g43, P.g54, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_103 = Vertex(name = 'V_103',\n particles = [ P.g41, P.g54, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_104 = Vertex(name = 'V_104',\n particles = [ P.g51, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_105 = Vertex(name = 'V_105',\n particles = [ P.g61, P.g76, P.g87, P.g18],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_106 = Vertex(name = 'V_106',\n particles = [ P.g32, P.g43, P.g54, P.g25 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_107 = Vertex(name = 'V_107',\n particles = [ P.g32, P.g43, P.g64, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_108 = Vertex(name = 'V_108',\n particles = [ P.g32, P.g43, P.g74, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_109 = Vertex(name = 'V_109',\n particles = [ P.g32, P.g43, P.g84, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_110 = Vertex(name = 'V_110',\n particles = [ P.g32, P.g53, P.g65, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_111 = Vertex(name = 'V_111',\n particles = [ P.g32, P.g63, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_112 = Vertex(name = 'V_112',\n particles = [ P.g32, P.g73, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_113 = Vertex(name = 'V_113',\n particles = [ P.g42, P.g54, P.g65, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_114 = Vertex(name = 'V_114',\n particles = [ P.g52, P.g65, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_115 = Vertex(name = 'V_115',\n particles = [ P.g62, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_116 = Vertex(name = 'V_116',\n particles = [ P.g43, P.g54, P.g65, P.g36 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_117 = Vertex(name = 'V_117',\n particles = [ P.g43, P.g54, P.g75, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_118 = Vertex(name = 'V_118',\n particles = [ P.g43, P.g54, P.g85, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_119 = Vertex(name = 'V_119',\n particles = [ P.g43, P.g64, P.g76, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_120 = Vertex(name = 'V_120',\n particles = [ P.g43, P.g74, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_121 = Vertex(name = 'V_121',\n particles = [ P.g53, P.g65, P.g76, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_122 = Vertex(name = 'V_122',\n particles = [ P.g63, P.g76, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_123 = Vertex(name = 'V_123',\n particles = [ P.g54, P.g65, P.g76, P.g47 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_124 = Vertex(name = 'V_124',\n particles = [ P.g54, P.g65, P.g86, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_125 = Vertex(name = 'V_125',\n particles = [ P.g54, P.g75, P.g87, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_126 = Vertex(name = 'V_126',\n particles = [ P.g64, P.g76, P.g87, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_127 = Vertex(name = 'V_127',\n particles = [ P.g65, P.g76, P.g87, P.g58 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\nV_128 = Vertex(name = 'V_128',\n particles = [ P.g76, P.g87, P.g98, P.g69 ],\n color = [ '1' ],\n lorentz = [ L.GLUON4 ],\n couplings = {(0,0):C.G2})\n\n# 5-point\n\nV_129 = Vertex(name = 'V129',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_130 = Vertex(name = 'V130',\n particles = [ P.g21, P.g32, P.g43, P.g64, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_131 = Vertex(name = 'V131',\n particles = [ P.g21, P.g32, P.g43, P.g74, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_132 = Vertex(name = 'V132',\n particles = [ P.g21, P.g32, P.g43, P.g84, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_133 = Vertex(name = 'V133',\n particles = [ P.g21, P.g32, P.g53, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_134 = Vertex(name = 'V134',\n particles = [ P.g21, P.g32, P.g63, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_135 = Vertex(name = 'V135',\n particles = [ P.g21, P.g32, P.g73, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_136 = Vertex(name = 'V136',\n particles = [ P.g21, P.g42, P.g54, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_137 = Vertex(name = 'V137',\n particles = [ P.g21, P.g52, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_138 = Vertex(name = 'V138',\n particles = [ P.g21, P.g62, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_139 = Vertex(name = 'V139',\n particles = [ P.g31, P.g43, P.g54, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_140 = Vertex(name = 'V140',\n particles = [ P.g41, P.g54, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_141 = Vertex(name = 'V141',\n particles = [ P.g51, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_142 = Vertex(name = 'V142',\n particles = [ P.g32, P.g43, P.g54, P.g65, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_143 = Vertex(name = 'V143',\n particles = [ P.g32, P.g43, P.g54, P.g75, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_144 = Vertex(name = 'V144',\n particles = [ P.g32, P.g43, P.g54, P.g85, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_145 = Vertex(name = 'V145',\n particles = [ P.g32, P.g43, P.g64, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_146 = Vertex(name = 'V146',\n particles = [ P.g32, P.g43, P.g74, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_147 = Vertex(name = 'V147',\n particles = [ P.g32, P.g53, P.g65, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_148 = Vertex(name = 'V148',\n particles = [ P.g32, P.g63, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_149 = Vertex(name = 'V149',\n particles = [ P.g42, P.g54, P.g65, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_150 = Vertex(name = 'V150',\n particles = [ P.g52, P.g65, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_151 = Vertex(name = 'V151',\n particles = [ P.g43, P.g54, P.g65, P.g76, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_152 = Vertex(name = 'V152',\n particles = [ P.g43, P.g54, P.g65, P.g86, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_153 = Vertex(name = 'V153',\n particles = [ P.g43, P.g54, P.g75, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_154 = Vertex(name = 'V154',\n particles = [ P.g43, P.g64, P.g76, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_155 = Vertex(name = 'V155',\n particles = [ P.g53, P.g65, P.g76, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_156 = Vertex(name = 'V156',\n particles = [ P.g54, P.g65, P.g76, P.g87, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\nV_157 = Vertex(name = 'V157',\n particles = [ P.g65, P.g76, P.g87, P.g98, P.g59 ],\n color = [ '1' ],\n lorentz = [ L.GLUON5 ],\n couplings = {(0,0):C.G2})\n\n# 6-point\n\nV_158 = Vertex(name = 'V158',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_159 = Vertex(name = 'V159',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g75, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_160 = Vertex(name = 'V160',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g85, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_161 = Vertex(name = 'V161',\n particles = [ P.g21, P.g32, P.g43, P.g64, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_162 = Vertex(name = 'V162',\n particles = [ P.g21, P.g32, P.g43, P.g74, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_163 = Vertex(name = 'V163',\n particles = [ P.g21, P.g32, P.g53, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_164 = Vertex(name = 'V164',\n particles = [ P.g21, P.g32, P.g63, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_165 = Vertex(name = 'V165',\n particles = [ P.g21, P.g42, P.g54, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_166 = Vertex(name = 'V166',\n particles = [ P.g21, P.g52, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_167 = Vertex(name = 'V167',\n particles = [ P.g31, P.g43, P.g54, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_168 = Vertex(name = 'V168',\n particles = [ P.g41, P.g54, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_169 = Vertex(name = 'V169',\n particles = [ P.g32, P.g43, P.g54, P.g65, P.g76, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_170 = Vertex(name = 'V170',\n particles = [ P.g32, P.g43, P.g54, P.g65, P.g86, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_171 = Vertex(name = 'V171',\n particles = [ P.g32, P.g43, P.g54, P.g75, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_172 = Vertex(name = 'V172',\n particles = [ P.g32, P.g43, P.g64, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_173 = Vertex(name = 'V173',\n particles = [ P.g32, P.g53, P.g65, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_174 = Vertex(name = 'V174',\n particles = [ P.g42, P.g54, P.g65, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_175 = Vertex(name = 'V175',\n particles = [ P.g43, P.g54, P.g65, P.g76, P.g87, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\nV_176 = Vertex(name = 'V176',\n particles = [ P.g54, P.g65, P.g76, P.g87, P.g98, P.g49 ],\n color = [ '1' ],\n lorentz = [ L.GLUON6 ],\n couplings = {(0,0):C.G2})\n\n# 7-point\n\nV_177 = Vertex(name = 'V177',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g76, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_178 = Vertex(name = 'V178',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g86, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_179 = Vertex(name = 'V179',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g75, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_180 = Vertex(name = 'V180',\n particles = [ P.g21, P.g32, P.g43, P.g64, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_181 = Vertex(name = 'V181',\n particles = [ P.g21, P.g32, P.g53, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_182 = Vertex(name = 'V182',\n particles = [ P.g21, P.g42, P.g54, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_183 = Vertex(name = 'V183',\n particles = [ P.g31, P.g43, P.g54, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_184 = Vertex(name = 'V184',\n particles = [ P.g32, P.g43, P.g54, P.g65, P.g76, P.g87, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\nV_185 = Vertex(name = 'V185',\n particles = [ P.g43, P.g54, P.g65, P.g76, P.g87, P.g98, P.g39 ],\n color = [ '1' ],\n lorentz = [ L.GLUON7 ],\n couplings = {(0,0):C.G2})\n\n# 8-point\n\nV_186 = Vertex(name = 'V186',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g76, P.g87, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.GLUON8 ],\n couplings = {(0,0):C.G2})\n\nV_187 = Vertex(name = 'V187',\n particles = [ P.g32, P.g43, P.g54, P.g65, P.g76, P.g87, P.g98, P.g29 ],\n color = [ '1' ],\n lorentz = [ L.GLUON8 ],\n couplings = {(0,0):C.G2})\n\n# 9-point\n\nV_188 = Vertex(name = 'V188',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g76, P.g87, P.g98, P.g19 ],\n color = [ '1' ],\n lorentz = [ L.GLUON9 ],\n couplings = {(0,0):C.G2})\n\n# 10-point\n\nV_189 = Vertex(name = 'V189',\n particles = [ P.g21, P.g32, P.g43, P.g54, P.g65, P.g76, P.g87, P.g98, P.ga9, P.g1a ],\n color = [ '1' ],\n lorentz = [ L.GLUON10 ],\n couplings = {(0,0):C.G2})\n\n# qqg\n\nV_201 = Vertex(name = 'V_201',\n particles = [ P.u3__tilde__, P.u1, P.g13 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_202 = Vertex(name = 'V_202',\n particles = [ P.u1__tilde__, P.u3, P.g13 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_203 = Vertex(name = 'V_203',\n particles = [ P.u4__tilde__, P.u1, P.g14 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_204 = Vertex(name = 'V_204',\n particles = [ P.u1__tilde__, P.u4, P.g14 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_205 = Vertex(name = 'V_205',\n particles = [ P.u5__tilde__, P.u1, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_206 = Vertex(name = 'V_206',\n particles = [ P.u1__tilde__, P.u5, P.g15 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_207 = Vertex(name = 'V_207',\n particles = [ P.u6__tilde__, P.u1, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_208 = Vertex(name = 'V_208',\n particles = [ P.u1__tilde__, P.u6, P.g16 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_209 = Vertex(name = 'V_209',\n particles = [ P.u7__tilde__, P.u1, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_210 = Vertex(name = 'V_210',\n particles = [ P.u1__tilde__, P.u7, P.g17 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_211 = Vertex(name = 'V_211',\n particles = [ P.u8__tilde__, P.u1, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_212 = Vertex(name = 'V_212',\n particles = [ P.u1__tilde__, P.u8, P.g18 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_213 = Vertex(name = 'V_213',\n particles = [ P.u9__tilde__, P.u1, P.g19 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_214 = Vertex(name = 'V_214',\n particles = [ P.u1__tilde__, P.u9, P.g19 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_215 = Vertex(name = 'V_215',\n particles = [ P.u1__tilde__, P.u2, P.g21 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_216 = Vertex(name = 'V_216',\n particles = [ P.u2__tilde__, P.u1, P.g21 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_217 = Vertex(name = 'V_217',\n particles = [ P.u4__tilde__, P.u2, P.g24 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_218 = Vertex(name = 'V_218',\n particles = [ P.u2__tilde__, P.u4, P.g24 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_219 = Vertex(name = 'V_219',\n particles = [ P.u5__tilde__, P.u2, P.g25 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_220 = Vertex(name = 'V_220',\n particles = [ P.u2__tilde__, P.u5, P.g25 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_221 = Vertex(name = 'V_221',\n particles = [ P.u6__tilde__, P.u2, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_222 = Vertex(name = 'V_222',\n particles = [ P.u2__tilde__, P.u6, P.g26 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_223 = Vertex(name = 'V_223',\n particles = [ P.u7__tilde__, P.u2, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_224 = Vertex(name = 'V_224',\n particles = [ P.u2__tilde__, P.u7, P.g27 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_225 = Vertex(name = 'V_225',\n particles = [ P.u8__tilde__, P.u2, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_226 = Vertex(name = 'V_226',\n particles = [ P.u2__tilde__, P.u8, P.g28 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_227 = Vertex(name = 'V_227',\n particles = [ P.u9__tilde__, P.u2, P.g29 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_228 = Vertex(name = 'V_228',\n particles = [ P.u2__tilde__, P.u9, P.g29 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\n\nV_229 = Vertex(name = 'V_229',\n particles = [ P.u3__tilde__, P.u1, P.g31 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_230 = Vertex(name = 'V_230',\n particles = [ P.u2__tilde__, P.u3, P.g32 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_231 = Vertex(name = 'V_231',\n particles = [ P.u3__tilde__, P.u2, P.g32 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_232 = Vertex(name = 'V_232',\n particles = [ P.u5__tilde__, P.u3, P.g35 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_233 = Vertex(name = 'V_233',\n particles = [ P.u3__tilde__, P.u5, P.g35 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_234 = Vertex(name = 'V_234',\n particles = [ P.u6__tilde__, P.u3, P.g36 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_235 = Vertex(name = 'V_235',\n particles = [ P.u3__tilde__, P.u6, P.g36 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_236 = Vertex(name = 'V_236',\n particles = [ P.u7__tilde__, P.u3, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_237 = Vertex(name = 'V_237',\n particles = [ P.u3__tilde__, P.u7, P.g37 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_238 = Vertex(name = 'V_238',\n particles = [ P.u8__tilde__, P.u3, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_239 = Vertex(name = 'V_239',\n particles = [ P.u3__tilde__, P.u8, P.g38 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_240 = Vertex(name = 'V_240',\n particles = [ P.u9__tilde__, P.u3, P.g39 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_241 = Vertex(name = 'V_241',\n particles = [ P.u3__tilde__, P.u9, P.g39 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_242 = Vertex(name = 'V_242',\n particles = [ P.u4__tilde__, P.u1, P.g41 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_243 = Vertex(name = 'V_243',\n particles = [ P.u4__tilde__, P.u2, P.g42 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_244 = Vertex(name = 'V_244',\n particles = [ P.u3__tilde__, P.u4, P.g43 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_245 = Vertex(name = 'V_245',\n particles = [ P.u4__tilde__, P.u3, P.g43 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_246 = Vertex(name = 'V_246',\n particles = [ P.u6__tilde__, P.u4, P.g46 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_247 = Vertex(name = 'V_247',\n particles = [ P.u4__tilde__, P.u6, P.g46 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_248 = Vertex(name = 'V_248',\n particles = [ P.u7__tilde__, P.u4, P.g47 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_249 = Vertex(name = 'V_249',\n particles = [ P.u4__tilde__, P.u7, P.g47 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_251 = Vertex(name = 'V_251',\n particles = [ P.u8__tilde__, P.u4, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_252 = Vertex(name = 'V_252',\n particles = [ P.u4__tilde__, P.u8, P.g48 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_253 = Vertex(name = 'V_253',\n particles = [ P.u9__tilde__, P.u4, P.g49 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_254 = Vertex(name = 'V_254',\n particles = [ P.u4__tilde__, P.u9, P.g49 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_255 = Vertex(name = 'V_255',\n particles = [ P.u5__tilde__, P.u1, P.g51 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_256 = Vertex(name = 'V_256',\n particles = [ P.u5__tilde__, P.u2, P.g52 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_257 = Vertex(name = 'V_257',\n particles = [ P.u5__tilde__, P.u3, P.g53 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_258 = Vertex(name = 'V_258',\n particles = [ P.u4__tilde__, P.u5, P.g54 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_259 = Vertex(name = 'V_259',\n particles = [ P.u5__tilde__, P.u4, P.g54 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_260 = Vertex(name = 'V_260',\n particles = [ P.u7__tilde__, P.u5, P.g57 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_261 = Vertex(name = 'V_261',\n particles = [ P.u5__tilde__, P.u7, P.g57 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_262 = Vertex(name = 'V_262',\n particles = [ P.u8__tilde__, P.u5, P.g58 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_263 = Vertex(name = 'V_263',\n particles = [ P.u5__tilde__, P.u8, P.g58 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_264 = Vertex(name = 'V_264',\n particles = [ P.u9__tilde__, P.u5, P.g59 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_265 = Vertex(name = 'V_265',\n particles = [ P.u5__tilde__, P.u9, P.g59 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_266 = Vertex(name = 'V_266',\n particles = [ P.u6__tilde__, P.u1, P.g61 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_267 = Vertex(name = 'V_267',\n particles = [ P.u6__tilde__, P.u2, P.g62 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_268 = Vertex(name = 'V_268',\n particles = [ P.u6__tilde__, P.u3, P.g63 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_269 = Vertex(name = 'V_269',\n particles = [ P.u6__tilde__, P.u4, P.g64 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_270 = Vertex(name = 'V_270',\n particles = [ P.u5__tilde__, P.u6, P.g65 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_271 = Vertex(name = 'V_271',\n particles = [ P.u6__tilde__, P.u5, P.g65 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_272 = Vertex(name = 'V_272',\n particles = [ P.u8__tilde__, P.u6, P.g68 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_273 = Vertex(name = 'V_273',\n particles = [ P.u6__tilde__, P.u8, P.g68 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_274 = Vertex(name = 'V_274',\n particles = [ P.u9__tilde__, P.u6, P.g69 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_275 = Vertex(name = 'V_275',\n particles = [ P.u6__tilde__, P.u9, P.g69 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_276 = Vertex(name = 'V_276',\n particles = [ P.u7__tilde__, P.u1, P.g71 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_277 = Vertex(name = 'V_277',\n particles = [ P.u7__tilde__, P.u2, P.g72 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_278 = Vertex(name = 'V_278',\n particles = [ P.u7__tilde__, P.u3, P.g73 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_279 = Vertex(name = 'V_279',\n particles = [ P.u7__tilde__, P.u4, P.g74 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_280 = Vertex(name = 'V_280',\n particles = [ P.u7__tilde__, P.u5, P.g75 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_281 = Vertex(name = 'V_281',\n particles = [ P.u6__tilde__, P.u7, P.g76 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_282 = Vertex(name = 'V_282',\n particles = [ P.u7__tilde__, P.u6, P.g76 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_283 = Vertex(name = 'V_283',\n particles = [ P.u9__tilde__, P.u7, P.g79 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_284 = Vertex(name = 'V_284',\n particles = [ P.u7__tilde__, P.u9, P.g79 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_285 = Vertex(name = 'V_285',\n particles = [ P.u8__tilde__, P.u1, P.g81 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_286 = Vertex(name = 'V_286',\n particles = [ P.u8__tilde__, P.u2, P.g82 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_287 = Vertex(name = 'V_287',\n particles = [ P.u8__tilde__, P.u3, P.g83 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_288 = Vertex(name = 'V_288',\n particles = [ P.u8__tilde__, P.u4, P.g84 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_289 = Vertex(name = 'V_289',\n particles = [ P.u8__tilde__, P.u5, P.g85 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_290 = Vertex(name = 'V_290',\n particles = [ P.u8__tilde__, P.u6, P.g86 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_291 = Vertex(name = 'V_291',\n particles = [ P.u7__tilde__, P.u8, P.g87 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_292 = Vertex(name = 'V_292',\n particles = [ P.u8__tilde__, P.u7, P.g87 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_293 = Vertex(name = 'V_293',\n particles = [ P.u9__tilde__, P.u1, P.g91 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_294 = Vertex(name = 'V_294',\n particles = [ P.u9__tilde__, P.u2, P.g92 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_295 = Vertex(name = 'V_295',\n particles = [ P.u9__tilde__, P.u3, P.g93 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_296 = Vertex(name = 'V_296',\n particles = [ P.u9__tilde__, P.u4, P.g94 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_297 = Vertex(name = 'V_297',\n particles = [ P.u9__tilde__, P.u5, P.g95 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_298 = Vertex(name = 'V_298',\n particles = [ P.u9__tilde__, P.u6, P.g96 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_299 = Vertex(name = 'V_299',\n particles = [ P.u9__tilde__, P.u7, P.g97 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_300 = Vertex(name = 'V_300',\n particles = [ P.u8__tilde__, P.u9, P.g98 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_301 = Vertex(name = 'V_301',\n particles = [ P.u9__tilde__, P.u8, P.g98 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG2})\n\nV_401 = Vertex(name = 'V_401',\n particles = [ P.u1__tilde__, P.u1, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_402 = Vertex(name = 'V_402',\n particles = [ P.u2__tilde__, P.u2, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_403 = Vertex(name = 'V_403',\n particles = [ P.u3__tilde__, P.u3, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_404 = Vertex(name = 'V_404',\n particles = [ P.u4__tilde__, P.u4, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_405 = Vertex(name = 'V_405',\n particles = [ P.u5__tilde__, P.u5, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_406 = Vertex(name = 'V_406',\n particles = [ P.u6__tilde__, P.u6, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_407 = Vertex(name = 'V_407',\n particles = [ P.u7__tilde__, P.u7, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_408 = Vertex(name = 'V_408',\n particles = [ P.u8__tilde__, P.u8, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\nV_409 = Vertex(name = 'V_409',\n particles = [ P.u9__tilde__, P.u9, P.g0 ],\n color = [ '1' ],\n lorentz = [ L.FFV1 ],\n couplings = {(0,0):C.GG0})\n\n\n" } ]
2
KristineMirzoyan/Medical-Care
https://github.com/KristineMirzoyan/Medical-Care
d73966f594787caaf3afa6f4944436c8864afc71
c09cc453e96bf493b7ab6cbc9f024200fd4166de
79ea957c3d15140c6568cc488dab812d25c445e0
refs/heads/master
2020-03-17T15:34:57.838378
2018-05-16T20:55:31
2018-05-16T20:55:31
133,716,046
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.765389084815979, "alphanum_fraction": 0.765389084815979, "avg_line_length": 29.75, "blob_id": "0e122fba78744974a1477d78cd9bd53d01b8e19f", "content_id": "3261a84569486f5e76b22698ced1b233b809eb32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 861, "license_type": "no_license", "max_line_length": 86, "num_lines": 28, "path": "/Med.main.py", "repo_name": "KristineMirzoyan/Medical-Care", "src_encoding": "UTF-8", "text": "import json\n\n#function: that open your json file, hospital_data\n\n#function:\n#print list of diseases\n#take user inputted disease (this should be done in main function)\n#print symptoms of the disease and ask user if they have them\n#if user says yes,\n#check each hospital and see which hospitals can treat that disease\n#next, check the hospitals that can treat the disease, and check if they have\n#the diagnosis tools needed for the disease\n#if the hospital has both the disease treatment and the diagnosis tools, print\n#the hospital information\n\ndef main():\n\twith open(\"Med_Care.json\") as \"data _ file\":\n\tcompare data\n return\n\n\n\ndef main():\n #run the function that opens hospital_data\n #get disease from user (make sure to print list of diseases for them first)\n #run function that takes user input and outputs diagnosis and hospital information\n\nmain()\n" } ]
1
NehaDShakya/ML-and-DL-Streamlit-Demo-App
https://github.com/NehaDShakya/ML-and-DL-Streamlit-Demo-App
3cedeb422f7c9b7f0a417e677c763b7227c23c16
292dd2e9a5c9773910edee4ab34637bc59c3c599
275b6ffb3f740f9fda485bb2fd88b77d19730421
refs/heads/master
2023-08-13T02:50:56.319894
2021-10-13T06:35:05
2021-10-13T06:35:05
413,557,778
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6546419262886047, "alphanum_fraction": 0.6647214889526367, "avg_line_length": 32.66071319580078, "blob_id": "f479112920b73185c980b1dc6bd31992eaa332ca", "content_id": "6226c24afed9cccdab10947e78ad5b569486ccf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3770, "license_type": "no_license", "max_line_length": 184, "num_lines": 112, "path": "/pages/ml/tp_02.py", "repo_name": "NehaDShakya/ML-and-DL-Streamlit-Demo-App", "src_encoding": "UTF-8", "text": "# import required libraries\nimport math\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nfrom sklearn.linear_model import LinearRegression\nfrom word2number import w2n\n\n\ndef app():\n # Add Page Title\n st.title(\"(ML) TP-02 : Linear Regression Multiple Variable\")\n\n st.write(\n \"In this TD we try and predict the price of house given its area, number of bedrooms and age. We also plot the linear regrssion.\"\n )\n\n # import the data sets\n df_home_prices = pd.read_csv(\"data/ml/tp_02/homeprices.csv\")\n\n st.write(\"We have a dataset of house prices that we use to train our model:\")\n # printing the prices dataset\n st.write(df_home_prices.head(10))\n\n # Data Cleaning\n st.write(\"We check if there is any missing values:\")\n st.write(df_home_prices.isnull().sum())\n\n st.write(\n \"Since we have missing values in the bedroom column we fill them with the median:\"\n )\n # Calculate medain bedrooms\n median_bedroom = math.floor(df_home_prices.bedrooms.median())\n st.write(\"Median bedroom number:\", median_bedroom)\n\n # Fill missing bedroom data with median\n df_home_prices[\"bedrooms\"] = df_home_prices[\"bedrooms\"].fillna(median_bedroom)\n\n # printing the prices dataset\n st.write(df_home_prices.head(10))\n st.write(df_home_prices.isnull().sum())\n\n st.write(\n \"Now we create and fit a linear regression model and check model coefficent and intercept: \"\n )\n\n # Create and fit the linear regression model\n model_home_prices = LinearRegression()\n model_home_prices.fit(\n df_home_prices[[\"area\", \"bedrooms\", \"age\"]], df_home_prices[\"price\"]\n )\n\n # Check model coefficent and intercept\n st.write(\"Coefficient: \", model_home_prices.coef_[0])\n st.write(\"Intercept: \", model_home_prices.intercept_)\n\n st.header(\"Exercise\")\n\n st.write(\n \"In this exercise we predict the salary given the experience, test score (out of 10) and interview score (out of 10). We also check the coefficient and intercept of the model.\"\n )\n\n # Read training data csv files\n df_hiring = pd.read_csv(\"data/ml/tp_02/hiring.csv\")\n st.write(df_hiring.head(10))\n\n # Data Cleaning\n st.write(\"We check if there is any missing values:\")\n st.write(df_hiring.isnull().sum())\n\n st.markdown(\n \"\"\"\n Since we have ;issing values we do the following:\n - Fill missing experience with 'zero'.\n - Fill missing test_score(out of 10) data with mean test_score.\n \n A linear model does not take text data so we convert experience data from string to integer (using word2number library).\n \"\"\"\n )\n # Fill missing experience with \"zero\"\n df_hiring[\"experience\"] = df_hiring[\"experience\"].fillna(\"zero\")\n\n # Calculate mean test_score\n mean_test_score = math.floor(df_hiring[\"test_score(out of 10)\"].mean())\n st.write(\"Median test score:\", mean_test_score)\n # Fill missing test_score(out of 10) data with mean test_score\n df_hiring[\"test_score(out of 10)\"] = df_hiring[\"test_score(out of 10)\"].fillna(\n mean_test_score\n )\n\n # Change experience data from word to number\n df_hiring[\"experience\"] = df_hiring[\"experience\"].apply(w2n.word_to_num)\n\n st.write(df_hiring.head(10))\n st.write(df_hiring.isnull().sum())\n\n # Create Linear regression object\n model_hiring = LinearRegression()\n\n # Train the model using training data\n model_hiring.fit(\n df_hiring[\n [\"experience\", \"test_score(out of 10)\", \"interview_score(out of 10)\"]\n ],\n df_hiring[\"salary($)\"],\n )\n\n # Check model coefficent and intercept\n st.write(\"Coefficient: \", model_hiring.coef_[0])\n st.write(\"Intercept: \", model_hiring.intercept_)\n" }, { "alpha_fraction": 0.62745600938797, "alphanum_fraction": 0.6348558068275452, "avg_line_length": 31.12295150756836, "blob_id": "82693b93037b537e8eabcfb433bdb3c8990c1d36", "content_id": "2de33d26989c2b7d65ca0888d04a8153c186ab31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3919, "license_type": "no_license", "max_line_length": 158, "num_lines": 122, "path": "/pages/ml/tp_01.py", "repo_name": "NehaDShakya/ML-and-DL-Streamlit-Demo-App", "src_encoding": "UTF-8", "text": "# import required libraries\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport streamlit as st\nfrom sklearn.linear_model import LinearRegression\n\n\ndef app():\n # Add Page Title\n st.title(\"(ML) TP-01 : Linear Regression Single Variable\")\n \n st.write(\n \"In this TD we are taking alook at single variable linear Regression.\"\n )\n\n st.header(\"Example\")\n st.write(\n \"In this part we try and predict the price of house given its area. We also plot the linear regrssion.\"\n )\n\n # import the data sets\n df_prices = pd.read_csv(\"data/ml/tp_01/homeprices.csv\")\n df_areas = pd.read_csv(\"data/ml/tp_01/areas.csv\")\n\n st.write(\"We have a dataset of house prices that we use to train our model:\")\n # printing the prices dataset\n st.write(df_prices.head())\n\n st.write(\n \"We also have a dataset with area of houses for which we need to predict the price:\"\n )\n # printing the areas dataset\n st.write(df_areas.head())\n\n st.write(\n \"First we make a scatter plot for the area and price from the first dataset:\"\n )\n # Creating scatter plot\n fig_prices_01 = plt.figure()\n plt.scatter(x=df_prices[\"area\"], y=df_prices[\"price\"], c=\"red\", marker=\"+\")\n plt.xlabel(\"Area (sq. feet)\")\n plt.ylabel(\"Prices\")\n # plt.plot(df_prices[\"area\"], df_prices[\"price\"], color=\"blue\")\n\n # Display the plot\n st.write(fig_prices_01)\n\n st.write(\"Now we predict the prices for the houses in the second dataset:\")\n\n # Create and fit the linear regression model\n model_prices = LinearRegression()\n model_prices.fit(df_prices[[\"area\"]], df_prices[\"price\"])\n\n # Check model coefficent and intercept\n st.write(\"Coefficient: \", np.round(model_prices.coef_[0], 2))\n st.write(\"Intercept: \", np.round(model_prices.intercept_, 2))\n\n fig_prices_02 = plt.figure()\n plt.scatter(x=df_prices[\"area\"], y=df_prices[\"price\"], c=\"red\", marker=\"+\")\n plt.xlabel(\"Area (sq. feet)\")\n plt.ylabel(\"Prices\")\n plt.plot(df_prices[\"area\"], model_prices.predict(df_prices[[\"area\"]]), color=\"blue\")\n\n st.write(fig_prices_02)\n\n df_areas[\"price\"] = model_prices.predict(df_areas[[\"area\"]])\n st.write(df_areas.head())\n\n st.header(\"Exercise\")\n\n st.write(\n \"In this exercise we fit and graph the linear regression model for the per capita income dataset. We also check the slope and intercept of the model.\"\n )\n\n # Read training data csv files\n df_canada_pci = pd.read_csv(\"data/ml/tp_01/canada_per_capita_income.csv\")\n st.write(df_canada_pci.head())\n\n st.write(\"First we make a scatter plot for the per capita incomein USD:\")\n # Creating scatter plot\n fig_canada_pci_01 = plt.figure()\n plt.scatter(\n x=df_canada_pci[\"year\"],\n y=df_canada_pci[\"per capita income (US$)\"],\n c=\"red\",\n marker=\"+\",\n )\n plt.xlabel(\"Year\")\n plt.ylabel(\"Per Capita Income (US$)\")\n # plt.plot(df_canada_pci[\"year\"], df_canada_pci[\"per capita income (US$)\"], color = \"blue\")\n st.write(fig_canada_pci_01)\n\n # Create Linear regression object\n model_canada_pci = LinearRegression()\n\n # Train the model using training data\n model_canada_pci.fit(\n df_canada_pci[[\"year\"]], df_canada_pci[\"per capita income (US$)\"]\n )\n\n # Check model coefficent and intercept\n st.write(\"Coefficient: \", model_canada_pci.coef_[0])\n st.write(\"Intercept: \", np.round(model_canada_pci.intercept_, 2))\n\n # Adding linear regression to scatter plot\n fig_canada_pci_02 = plt.figure()\n plt.scatter(\n x=df_canada_pci[\"year\"],\n y=df_canada_pci[\"per capita income (US$)\"],\n c=\"red\",\n marker=\"+\",\n )\n plt.xlabel(\"Year\")\n plt.ylabel(\"Per Capita Income (US$)\")\n plt.plot(\n df_canada_pci[\"year\"],\n model_canada_pci.predict(df_canada_pci[[\"year\"]]),\n color=\"blue\",\n )\n\n st.write(fig_canada_pci_02)\n" }, { "alpha_fraction": 0.699009895324707, "alphanum_fraction": 0.7227723002433777, "avg_line_length": 25.578947067260742, "blob_id": "5385ac25b240bf3130d1f26b0e92974bc8f8726f", "content_id": "ffc2f636740b10cfe532cba37ac4d4fac17b522e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 98, "num_lines": 19, "path": "/app.py", "repo_name": "NehaDShakya/ML-and-DL-Streamlit-Demo-App", "src_encoding": "UTF-8", "text": "import streamlit as st\nfrom multiapp import MultiApp\nfrom pages.ml import tp_01, tp_02 # import your app modules here\n\napp = MultiApp()\n\nst.markdown(\n \"\"\"\n# ML and DL Streamlit Demo App\nThis app is compilation of the TPs I did in my Intro to Machine Learning and Deep Learning Course.\n\"\"\"\n)\n\n# Add all your application here\napp.add_app(\"(ML) TP 01 : Linear Regression Single Variable\", tp_01.app)\napp.add_app(\"(ML) TP 02 : Linear Regression Multiple Variables\", tp_02.app)\n\n# The main app\napp.run()\n" }, { "alpha_fraction": 0.5496688485145569, "alphanum_fraction": 0.7284768223762512, "avg_line_length": 18, "blob_id": "e340d75578a629d54418ce515a132cac0bd3e0f7", "content_id": "6ea0bd208426a953e5583443f9fe4b864236357a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 151, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/requirements.txt", "repo_name": "NehaDShakya/ML-and-DL-Streamlit-Demo-App", "src_encoding": "UTF-8", "text": "matplotlib==3.4.2\nmatplotlib-inline==0.1.2\nnumpy==1.19.1\npandas==1.2.4\npandas-datareader==0.10.0\nscikit-learn==0.24.2\nstreamlit==1.0.0\nword2number==1.1" }, { "alpha_fraction": 0.7373448014259338, "alphanum_fraction": 0.7411652207374573, "avg_line_length": 19.134614944458008, "blob_id": "83ee14dbe774a49fbc5bd0f36658c803275dd6b4", "content_id": "6426b2a02cec329fb15b1fa0149aef2f57d8e3e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1047, "license_type": "no_license", "max_line_length": 148, "num_lines": 52, "path": "/README.md", "repo_name": "NehaDShakya/ML-and-DL-Streamlit-Demo-App", "src_encoding": "UTF-8", "text": "# Machine-Learning-Streamlit-App\n\nThis app is compilation of the TPs I did in my Intro to Machine Learning and Deep Learning Course.\n\n## Demo\n\nLaunch the web app:\n\n[[Streamlit App]](https://ml-and-dl-streamlit-demo-app.herokuapp.com/)\n\n## Reproducing this web app\n\nTo recreate this web app on your own computer, do the following.\n\n### Create conda environment\n\nFirstly, we will create a conda environment called *ml_dl_demo*\n\n```\nconda create -n ml_dl_demo python=3.8.11\n```\n\nSecondly, we will login to the *multipage* environement\n\n```\nconda activate ml_dl_demo\n```\n\n### Install prerequisite libraries\n\nDownload requirements.txt file\n\n```\nwget https://raw.githubusercontent.com/NehaDShakya/ML-and-DL-Streamlit-Demo-App/master/requirements.txt\n\n```\n\nPip install libraries\n\n```\npip install -r requirements.txt\n```\n\n### Download and unzip this repo\n\nDownload [this repo](https://github.com/NehaDShakya/ML-and-DL-Streamlit-Demo-App/archive/refs/heads/master.zip) and unzip as your working directory.\n\n### Launch the app\n\n```\nstreamlit run app.py\n```\n" } ]
5
techmatt/Solaris
https://github.com/techmatt/Solaris
b5579dd1681a8e030a45b2be61683aeca2de13ff
5017e6e014c804ffe570024e37115d333753e57d
4302bcdbaed17fc6210307a0f14988ecd2411a8d
refs/heads/main
2023-07-28T02:39:44.761597
2021-09-12T04:19:29
2021-09-12T04:19:29
384,784,657
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5956568121910095, "alphanum_fraction": 0.6216662526130676, "avg_line_length": 36.61180114746094, "blob_id": "74475e67053052066bc3d27abbdefad6fa7a9e3c", "content_id": "f394a7ededaf6842b829a42ba59112d57dfb0d0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12111, "license_type": "no_license", "max_line_length": 291, "num_lines": 322, "path": "/makeSpells.py", "repo_name": "techmatt/Solaris", "src_encoding": "UTF-8", "text": "\nimport os\nfrom functools import cmp_to_key\nfrom PIL import Image, ImageFont, ImageDraw\nfrom fpdf import FPDF\nimport numpy as np\nfrom math import pow\n\nimport util\nimport cv2\n\n#full image: (666,906)\n#half image: (666, 453)\n#half image: (2664, 1812)\n\ndef scoreSpell(a, weights):\n return a['r'] * pow(10.0, weights[0]) + \\\n a['y'] * pow(10.0, weights[1]) + \\\n a['g'] * pow(10.0, weights[2]) + \\\n a['b'] * pow(10.0, weights[3]) + \\\n a['v'] * pow(10.0, weights[4]) + \\\n a['w'] * pow(10.0, weights[5]) + \\\n a['gray'] * pow(10.0, weights[6])\n\ndef anySort(a, b, weights):\n aScore = scoreSpell(a, weights)\n bScore = scoreSpell(b, weights)\n if aScore > bScore:\n return 1\n elif aScore == bScore:\n return 0\n else:\n return -1\n\ndef rSort(a, b):\n return anySort(a, b, [7, 6, 5, 4, 3, 2, 1])\n\ndef ySort(a, b):\n return anySort(a, b, [6, 7, 5, 4, 3, 2, 1])\n\ndef gSort(a, b):\n return anySort(a, b, [5, 6, 7, 4, 3, 2, 1])\n\ndef bSort(a, b):\n return anySort(a, b, [4, 6, 5, 7, 3, 2, 1])\n\ndef vSort(a, b):\n return anySort(a, b, [3, 6, 5, 4, 7, 2, 1])\n\ndef wSort(a, b):\n return anySort(a, b, [2, 6, 5, 4, 3, 7, 1])\n\ndef getCompareKey(s):\n if s == 'r':\n return cmp_to_key(rSort)\n elif s == 'y':\n return cmp_to_key(ySort)\n elif s == 'g':\n return cmp_to_key(gSort)\n elif s == 'b':\n return cmp_to_key(bSort)\n elif s == 'v':\n return cmp_to_key(vSort)\n elif s == 'w':\n return cmp_to_key(wSort)\n return None\n\nclass Opt:\n def __init__(self):\n self.fontTitle = ImageFont.truetype(\"fonts/CenturyGothicBold.ttf\", 200)\n self.fontDesc = ImageFont.truetype(\"fonts/futura.ttf\", 80)\n self.bookTitle = ImageFont.truetype(\"fonts/FlareGothic.ttf\", 200)\n self.fontPage = ImageFont.truetype(\"fonts/CenturyGothicBold.ttf\", 140)\n self.fontAnom = ImageFont.truetype(\"fonts/futura.ttf\", 80)\n self.pageWidth = 2664\n self.pageHeight = 1812\n self.gemImgs = {}\n self.gemImgSize = [200, 200]\n self.gemImgs['r'] = util.loadImg('images/red.png', self.gemImgSize)\n self.gemImgs['y'] = util.loadImg('images/yellow.png', self.gemImgSize)\n self.gemImgs['g'] = util.loadImg('images/green.png', self.gemImgSize)\n self.gemImgs['b'] = util.loadImg('images/blue.png', self.gemImgSize)\n self.gemImgs['v'] = util.loadImg('images/violet.png', self.gemImgSize)\n self.gemImgs['gray'] = util.loadImg('images/gray.png', self.gemImgSize)\n self.gemImgs['w'] = util.loadImg('images/white.png', self.gemImgSize)\n\nclass SpellList:\n def __init__(self, filename):\n self.spells = []\n self.spellbooks = {}\n\n spellbookChars = ['r', 'y', 'g', 'b', 'v', 'w']\n for c in spellbookChars:\n self.spellbooks[c] = []\n \n with open(filename) as f:\n spellLines = f.readlines()\n spellLines = [x.strip() for x in spellLines]\n for line in spellLines:\n spell = self.makeSpell(line)\n self.spells.append(spell)\n for c in spellbookChars:\n if spell[c] > 0:\n self.spellbooks[c].append(spell)\n\n for c in spellbookChars:\n self.spellbooks[c].sort(key=getCompareKey(c))\n\n #self.spells.append(self.makeSpell())\n \n def makeSpell(self, line):\n line = line.replace(' ', ' ')\n parts = line.split('\\t')\n if len(parts) != 9:\n print('unexpected line: ' + line)\n return None\n \n def readInt(s):\n if len(s) == 0:\n return 0\n else:\n return int(s)\n result = {}\n result['name'] = parts[0].title()\n result['gray'] = readInt(parts[1])\n result['r'] = readInt(parts[2])\n result['y'] = readInt(parts[3])\n result['g'] = readInt(parts[4])\n result['b'] = readInt(parts[5])\n result['v'] = readInt(parts[6])\n result['w'] = readInt(parts[7])\n result['desc'] = parts[8].replace(' ', ' ')\n return result\n\ndef resizeImgStd(img, newSize):\n result = cv2.resize(img, dsize=(newSize[1], newSize[0]), interpolation=cv2.INTER_LANCZOS4) #cv2.INTER_AREA\n result = np.clip(result, 0.0, 1.0)\n return result\n\ndef makeSpellImg(opt, spell, fullWidth, height, leftPad, rightPad):\n allImgs = []\n for gem in ['w', 'r', 'y', 'g', 'b', 'v', 'gray']:\n for x in range(0, spell[gem]):\n gemImg = opt.gemImgs[gem]\n gemImg = resizeImgStd(gemImg, [height, height])\n allImgs.append(gemImg)\n result = np.concatenate(allImgs, axis=1)\n return makeSpellImgFinal(result, fullWidth, leftPad, rightPad)\n \ndef makeSpellImgBookTitle(opt, gem, gemShape, fullWidth, leftPad, rightPad):\n gemImg = opt.gemImgs[gem]\n gemImg = cv2.resize(gemImg, dsize=(gemShape, gemShape), interpolation=cv2.INTER_LANCZOS4) #cv2.INTER_AREA\n gemImg = np.clip(gemImg, 0.0, 1.0)\n return makeSpellImgFinal(gemImg, fullWidth, leftPad, rightPad)\n\ndef makeSpellImgFinal(imgBase, fullWidth, leftPad, rightPad):\n if imgBase.shape[1] > fullWidth:\n imgBase = resizeImgStd(imgBase, [imgBase.shape[0], fullWidth])\n\n centerWidth = fullWidth - leftPad - rightPad\n centerPadL = (centerWidth - imgBase.shape[1]) // 2\n centerPadR = centerWidth - imgBase.shape[1] - centerPadL\n result = np.pad(imgBase, ((0, 0), (centerPadL, centerPadR), (0, 0)), constant_values=1.0)\n result = np.pad(result, ((0, 0), (leftPad, rightPad), (0, 0)), constant_values=1.0)\n return result\n\ndef makeSpellbookTitle(opt, spellbookChar):\n lookupDict = {'r': 'Red', 'y' : 'Yellow', 'g': 'Green',\n 'b': 'Blue', 'v' : 'Violet', 'w': 'White'}\n title = 'The ' + lookupDict[spellbookChar] + ' Grimoire'\n\n result = np.ones([opt.pageHeight, opt.pageWidth, 3], dtype=np.float32)\n\n gemStart = int(opt.pageHeight * 0.15)\n gemHeight = int(opt.pageHeight * 0.45)\n\n titleStart = int(opt.pageHeight * 0.65)\n titleHeight = int(opt.pageHeight * 0.2)\n\n leftPad = int(opt.pageWidth * 0.2)\n rightPad = int(opt.pageWidth * 0.1)\n\n gemImg = makeSpellImgBookTitle(opt, spellbookChar, gemHeight, opt.pageWidth, leftPad, rightPad)\n titleImg = util.drawWrappedText(title, opt.bookTitle, opt.pageWidth, titleHeight, leftPad, rightPad)\n \n result[gemStart:gemStart + gemHeight] = gemImg[:]\n result[titleStart:titleStart + titleHeight] = titleImg[:]\n \n return result\n\ndef makeSpellbookTOC(opt, spellbook):\n result = np.ones([opt.pageHeight, opt.pageWidth, 3], dtype=np.float32)\n\n #leftPad = int(opt.pageWidth * 0.2)\n #rightPad = int(opt.pageWidth * 0.1)\n \n tablePadding = 5\n\n columnWidth = int(opt.pageWidth * 0.45)\n entryHeight = int(opt.gemImgSize[0] * 0.8)\n\n columnStartsX = [int(opt.pageWidth * 0.1), int(opt.pageWidth * 0.55)]\n\n spellIdx = 0\n for columnIdx in range(0, 2):\n for rowIdx in range(0, 10):\n if spellIdx >= len(spellbook):\n continue\n\n spell = spellbook[spellIdx]\n spellImg = makeSpellImg(opt, spell, columnWidth, entryHeight, 0, 0)\n\n entryStartY = int(opt.pageHeight * 0.05 + rowIdx * (entryHeight + tablePadding))\n columnStartX = columnStartsX[columnIdx]\n \n result[entryStartY:entryStartY + entryHeight, columnStartX:columnStartX + columnWidth] = spellImg[:]\n\n pageIdxWidth = int(0.1 * opt.pageWidth)\n pageStartX = columnStartX + int(0.35 * opt.pageWidth)\n pageImg = util.drawWrappedText(str(spellIdx + 2), opt.fontPage, pageIdxWidth, entryHeight, 0, 0)\n result[entryStartY:entryStartY + entryHeight,pageStartX:pageStartX + pageIdxWidth] = \\\n alphaMask(pageImg[:], result[entryStartY:entryStartY + entryHeight,pageStartX:pageStartX + pageIdxWidth])\n\n spellIdx += 1\n\n result[:, columnStartsX[1] - 10:columnStartsX[1]] = 0.0\n\n anomX = int(0.57 * opt.pageWidth)\n anomY = int(0.4 * opt.pageHeight)\n anomWidth = int(0.4 * opt.pageWidth)\n anomHeight = int(0.6 * opt.pageHeight)\n anomalyText = \"If you try to cast a spell not on this list, you instead trigger an anomaly. \" \\\n \"For the next minute, all casters must walk heel-to-toe and cannot meditate or cast spells.\"\n anomalyImg = util.drawWrappedText(anomalyText, opt.fontAnom, anomWidth, anomHeight, 0, 0)\n result[anomY:anomY + anomHeight,anomX:anomX + anomWidth] = \\\n alphaMask(anomalyImg[:], result[anomY:anomY + anomHeight,anomX:anomX + anomWidth])\n \n return result\n\ndef alphaMask(imgOver, imgUnder):\n imgAlpha = (imgOver[:,:,0:1] + imgOver[:,:,1:2] + imgOver[:,:,2:3]) / 3.0\n return imgUnder * imgAlpha + imgOver * (1.0 - imgAlpha)\n\ndef makeSpellPage(opt, spell, pageIdx):\n result = np.ones([opt.pageHeight, opt.pageWidth, 3], dtype=np.float32)\n\n titleStart = int(opt.pageHeight * 0.05)\n titleHeight = int(opt.pageHeight * 0.2)\n\n spellStart = int(opt.pageHeight * 0.2)\n spellHeight = opt.gemImgSize[0]\n \n descStart = int(opt.pageHeight * 0.37)\n descHeight = int(opt.pageHeight * 0.55)\n \n leftPad = int(opt.pageWidth * 0.2)\n rightPad = int(opt.pageWidth * 0.1)\n\n spellImg = makeSpellImg(opt, spell, opt.pageWidth, opt.gemImgSize[0], leftPad, rightPad)\n titleImg = util.drawWrappedText(spell['name'], opt.fontTitle, opt.pageWidth, titleHeight, leftPad, rightPad)\n descImg = util.drawWrappedText(spell['desc'], opt.fontDesc, opt.pageWidth, descHeight, leftPad, rightPad)\n \n pageIdxWidth = int(0.1 * opt.pageWidth)\n pageIdxHeight = int(0.1 * opt.pageHeight)\n pageIdxStartX = int(0.9 * opt.pageWidth)\n pageIdxStartY = int(0.85 * opt.pageHeight)\n pageImg = util.drawWrappedText(str(pageIdx), opt.fontPage, pageIdxWidth, pageIdxHeight, 0, 0)\n \n #util.printArrayStats(result, 'result')\n #util.printArrayStats(titleImg, 'titleImg')\n\n result[titleStart:titleStart + titleHeight] = titleImg[:]\n result[spellStart:spellStart + spellHeight] = spellImg[:]\n result[descStart:descStart + descHeight] = descImg[:]\n result[pageIdxStartY:pageIdxStartY + pageIdxHeight,pageIdxStartX:pageIdxStartX + pageIdxWidth] = \\\n alphaMask(pageImg[:], result[pageIdxStartY:pageIdxStartY + pageIdxHeight,pageIdxStartX:pageIdxStartX + pageIdxWidth])\n\n return result\n #util.saveNPYImg('spell.png', spellImg)\n #util.saveNPYImg('result.png', result)\n\ndef makeSpellbookImages(opt, spellbookChar, spellbook):\n baseDir = 'spellbooks/' + spellbookChar + '/'\n os.makedirs(baseDir, exist_ok=True)\n\n titleImg = makeSpellbookTitle(opt, spellbookChar)\n TOCImg = makeSpellbookTOC(opt, spellbook)\n\n util.saveNPYImgDouble(baseDir + '0.png', titleImg)\n util.saveNPYImgDouble(baseDir + '1.png', TOCImg)\n\n pageIdx = 2\n for spellIdx in range(0, len(spellbook)):\n spell = spellbook[spellIdx]\n print('saving ' + spellbookChar + ' ' + str(pageIdx))\n img = makeSpellPage(opt, spell, spellIdx+2)\n util.saveNPYImgDouble(baseDir + str(pageIdx) + '.png', img)\n pageIdx += 1\n\n pdf = FPDF()\n for idx in range(0, pageIdx):\n pdf.add_page()\n #a4 letter size in mm: 210 x 297 mm\n pdf.image(baseDir + str(idx) + '.png', 0, 0, 210, 297)\n #pdf.output(baseDir + str(spellbookChar) + \".pdf\", \"F\")\n pdf.output('spellbooks/' + str(spellbookChar) + \".pdf\", \"F\")\n\n\ndef convertAll():\n opt = Opt()\n spellList = SpellList('spells.txt')\n\n\n for spellChar in spellList.spellbooks:\n makeSpellbookImages(opt, spellChar, spellList.spellbooks[spellChar])\n\n #makeSpellPage(opt, spellList.spells[0])\n \n #img = util.drawWrappedText('this is a really long string. this is a really long string. this is a really long string. this is a really long string. this is a really long string. this is a really long string. this is a really long string. this is a really long string.', fontTitle, 1024)\n #util.printArrayStats(img, 'img')\n #util.saveNPYImg('debug.png', img)\n\nconvertAll()" }, { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 10, "blob_id": "bfdc9dbbf4afc5c06582c35428ffe61868160ccd", "content_id": "7fc1b8f2ff3fe5291d9e2d0ac1ba05e7bf9e470b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 22, "license_type": "no_license", "max_line_length": 11, "num_lines": 2, "path": "/README.md", "repo_name": "techmatt/Solaris", "src_encoding": "UTF-8", "text": "# Solaris\nA LRS LARP.\n" }, { "alpha_fraction": 0.5690203309059143, "alphanum_fraction": 0.6017811894416809, "avg_line_length": 33.16304397583008, "blob_id": "3b79d9a08d98265657ed8c59e52c7e04147f4f73", "content_id": "da6e2a888c9715f5cb28b4ef999812358090f19a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3144, "license_type": "no_license", "max_line_length": 107, "num_lines": 92, "path": "/util.py", "repo_name": "techmatt/Solaris", "src_encoding": "UTF-8", "text": "\nfrom PIL import Image, ImageFont, ImageDraw\n\nimport cv2\nimport numpy as np\n\ndef loadImg(filename, newSize):\n img = cv2.imread(filename, cv2.IMREAD_COLOR)[:,:,0:3]\n img = cv2.resize(img, dsize=(newSize[1], newSize[0]), interpolation=cv2.INTER_LANCZOS4) #cv2.INTER_AREA\n img = img.astype(np.float32) / 255.0\n img = np.clip(img, 0.0, 1.0)\n return img\n\ndef breakWrappedText(text, font, lineLength):\n words = text.split(' ')\n curLine = ''\n allLines = []\n for word in words:\n if len(curLine) == 0:\n newLine = word\n else:\n newLine = curLine + ' ' + word\n if font.getlength(newLine) >= lineLength:\n allLines.append(curLine)\n curLine = word\n else:\n curLine = newLine\n allLines.append(curLine)\n return allLines\n \n \"\"\"for word in text.split():\n line = f'{lines[-1]} {word}'.strip()\n if font.getlength(text) <= lineLength:\n lines[-1] = line\n else:\n lines.append(word)\n return lines\n #return '\\n'.join(lines)\"\"\"\n\n\"\"\"def drawWrappedText2(image, text, font, text_color, text_start_height):\n draw = ImageDraw.Draw(image)\n image_width, image_height = image.size\n y_text = text_start_height\n lines = textwrap.wrap(text, width=40)\n for line in lines:\n line_width, line_height = font.getsize(line)\n draw.text(((image_width - line_width) / 2, y_text), \n line, font=font, fill=text_color)\n y_text += line_height\"\"\"\n\ndef drawWrappedText(allText, font, fullWidth, fullHeight, leftPad, rightPad):\n width = fullWidth - leftPad - rightPad\n image = Image.new(\"RGBA\", (width, fullHeight), (255,255,255,255))\n draw = ImageDraw.Draw(image)\n\n lines = breakWrappedText(allText, font, width)\n yOffset = 0\n for line in lines:\n lineWidth, lineHeightBad = font.getsize(line)\n _, lineHeightGood = font.getsize(line + 'CLgp')\n draw.text(((width - lineWidth) / 2, yOffset), \n line, font=font, fill=(0,0,0,255))\n yOffset += lineHeightGood\n\n #draw.rectangle([(0, 0), (width, height)], fill=(128,128,128,255))\n #draw.text((10, 0), text, (0,0,0,255), font=font)\n #img_resized = image.resize((188,45), Image.ANTIALIAS)\n result = np.array(image)[:,:,0:3] / 255.0\n result = np.pad(result, ((0, 0), (leftPad, rightPad), (0, 0)), constant_values=1.0)\n return result\n\ndef saveNPYImg(filename, img):\n img = (img * 255.0).astype(np.uint8)\n cv2.imwrite(filename, img)\n\ndef saveNPYImgDouble(filename, img):\n img = np.concatenate([img, img], axis=0)\n saveNPYImg(filename, img)\n\ndef linearMap(x, minValIn, maxValIn, minValOut, maxValOut):\n return ((x - minValIn) * (maxValOut - minValOut) / (maxValIn - minValIn) + minValOut)\n\ndef printArrayStats(x, name):\n print(name, x.shape, x.dtype, 'range=[', np.amin(x), ', ', np.amax(x), ']')\n \ndef makeGrayscale(img):\n if len(img.shape) == 2:\n return img\n if img.shape[2] == 1:\n return img[:,:,0]\n imgMax = np.max(img, axis=2)\n imgMin = np.min(img, axis=2)\n return (imgMax + imgMin) / 2.0 # 'L' in HSL\n" } ]
3
bezgodov/python-basics
https://github.com/bezgodov/python-basics
a287ecfaea2ed2e99845a99ba399611142af263f
8f08b56d7bd316b2198f530838ee056664f44dcc
a4ac575c182726954c704b3b0b964e208cd61eca
refs/heads/master
2020-04-12T23:11:56.458792
2019-02-11T12:17:11
2019-02-11T12:17:11
162,811,716
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.5858585834503174, "avg_line_length": 21.11111068725586, "blob_id": "dfca294ee2adc81c38f1f8cdafe0cdfbc6e7c6b4", "content_id": "a4ad2b5837a2230a0df9456afda2ea92d7821a30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/5/M.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nN, M = map(int, input().split())\n\na = np.array(list(map(int, input().split())))\ni = np.array(list(map(int, input().split())))\n\nnp.add.at(a, i - 1, 1)\nprint(' '.join(map(str, a)))" }, { "alpha_fraction": 0.557894766330719, "alphanum_fraction": 0.5789473652839661, "avg_line_length": 95, "blob_id": "c87caca9fb41b50bc2cd47b7a3762cc3aab4b429", "content_id": "b669548e3162f74be15e3b31c0d324ffbacea51b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 95, "num_lines": 1, "path": "/4/F.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "print(' '.join(map(str, [i ** 2 for i in reversed(list(map(int, input().split()))) if i > 0])))" }, { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.5283018946647644, "avg_line_length": 25.75, "blob_id": "69aa52b3f807a53fb58e6417a45a069f3bbfb7fe", "content_id": "61441f9b6aa44dbb54891d172d8e24c99bc737e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 106, "license_type": "no_license", "max_line_length": 36, "num_lines": 4, "path": "/2/H.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n, k = map(int, input().split())\n\na = list(map(str, input().split()))\nprint(' '.join(list(a[k:] + a[:k])))" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6761904954910278, "avg_line_length": 25.5, "blob_id": "3350e6618ff4ae1d06ea41221d93f25f7ac077dc", "content_id": "78df27c68c9f4c9674d6930eb6d9dcc2cee1727f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/5/A.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "from numpy import linalg as LA\n\nvals = list(map(int, input().split()))\nprint(int(LA.norm(vals, ord = 1)))" }, { "alpha_fraction": 0.6788991093635559, "alphanum_fraction": 0.6880733966827393, "avg_line_length": 26.5, "blob_id": "4bbb453309d2aa904d31950cd3de2aae1e407c52", "content_id": "451fa60735721d7c7f0f7d341d9bec3ccd7d403d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 109, "license_type": "no_license", "max_line_length": 40, "num_lines": 4, "path": "/5/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "from numpy import linalg as LA\n\nvals = list(map(float, input().split()))\nprint(float(LA.norm(vals, ord = 2)))" }, { "alpha_fraction": 0.5100864768028259, "alphanum_fraction": 0.5100864768028259, "avg_line_length": 20.6875, "blob_id": "c4f7dae43f93f0279a598ba5dce9a98a21cd0c58", "content_id": "72b01cd43c6cc464ae7f7db8cbf0b9557413a261", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/7/E.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import re\n\nclass Student:\n def __init__(self, name=''):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n @name.setter\n def name(self, name):\n if (type(name) == str) and (re.match(\"^[A-Za-z ]+$\", name)):\n self._name = name\n else:\n raise ValueError(\"Wrong value\")\n" }, { "alpha_fraction": 0.6160714030265808, "alphanum_fraction": 0.6160714030265808, "avg_line_length": 31.14285659790039, "blob_id": "dad3cc2651130afa91a2f5a2fab5f2ec17ca650c", "content_id": "10f4cb7fe6f0b79c84621dfcd20d29b0935ff536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 224, "license_type": "no_license", "max_line_length": 51, "num_lines": 7, "path": "/6/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "class Hello_world:\n def __init__(self):\n self.hello = 'Hello world!'\n def get_hello_world(self):\n return self.hello\n def set_new_hello_world(self, new_hello_world):\n self.hello = new_hello_world" }, { "alpha_fraction": 0.5421686768531799, "alphanum_fraction": 0.5542168617248535, "avg_line_length": 11, "blob_id": "b25175ea39136bee3edb4549be4a39181bc8e741", "content_id": "fa0a358df508abe8f5a593a0f98797dab111758f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/2/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\na = []\nfor i in range(n):\n\ta.append(input())\na.sort()\nprint(a[0])" }, { "alpha_fraction": 0.5645161271095276, "alphanum_fraction": 0.5752688050270081, "avg_line_length": 19.77777862548828, "blob_id": "424bf79f964c9c8f262ba8560e5e78652a07b1fb", "content_id": "b843bcffc958747bb6c17156918959e70f06c1a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 45, "num_lines": 9, "path": "/5/E.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\nN, M = list(map(int, input().split()))\n\na = []\nfor _ in range(N):\n a.append(list(map(int, input().split())))\n\na = np.transpose(a)\nprint(np.dot(a[0], a[len(a) - 1]))" }, { "alpha_fraction": 0.591160237789154, "alphanum_fraction": 0.591160237789154, "avg_line_length": 15.454545021057129, "blob_id": "ba78a8d1803c9922bd5ffa4554803ec826ff731b", "content_id": "f018b48630891a543ca73909c9a1e0816620adb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 181, "license_type": "no_license", "max_line_length": 47, "num_lines": 11, "path": "/5/L.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nN = int(input())\n\nmtx = []\n\nfor _ in range(N):\n mtx.append(list(map(int, input().split())))\n\nfor row in np.transpose(mtx):\n print(' '.join(map(str, row)))\n" }, { "alpha_fraction": 0.5368170738220215, "alphanum_fraction": 0.559382438659668, "avg_line_length": 27.066667556762695, "blob_id": "0cef7e7317d2041f2320a344ea53c442f7d210ce", "content_id": "7b98a405dba923a85884feeaed041bc00fa93255", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 842, "license_type": "no_license", "max_line_length": 72, "num_lines": 30, "path": "/7/A.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import time\nimport random\n\ndef time_decorator(decorated_func):\n def helper(*xs, **kws):\n t0 = time.time()\n print('Start time:', t0)\n decorated_func(*xs)\n t1 = time.time()\n print('End time:', t1)\n print('Execution time:', str(t1 - t0))\n return helper\n\ndef counter_decorator(decorated_func):\n def helper(*xs, **kws):\n helper.calls += 1\n print('Function calls count:', helper.calls)\n return decorated_func(*xs, **kws)\n helper.calls = 0\n return helper\n\ndef method_decorator(decorated_method):\n def helper(**kws):\n print(kws)\n def count(arg, x, y, z):\n coefs = [3, 7, 15, 18]\n return x * coefs[0] - y * coefs[1] + z * coefs[2] + coefs[3]\n print(xs, kws)\n return decorated_method(xs[0], count(*xs))\n return helper\n" }, { "alpha_fraction": 0.5409556031227112, "alphanum_fraction": 0.5546075105667114, "avg_line_length": 16.235294342041016, "blob_id": "2d7b5c0b5ee375f4039d98d1712e94f91fd2805a", "content_id": "8020cde660a151611d7c67bcab553eb4d693b141", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 50, "num_lines": 34, "path": "/1/N.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nscript_location = Path(__file__).absolute().parent\nfile_location = script_location / 'input.txt'\ninput_file = file_location.open()\n\n# input_file=open(\"input.txt\", \"r\")\n\nA, B, t = map(int, input_file.readline().split())\n\ninput_file.close\n\nside = [\n \"S\",\n \"E\",\n \"N\",\n \"W\",\n]\n\nkoef = (t % ((A + B) * 2))\n\ni = 0\nsumm = 0\nwhile summ <= koef:\n if i % 2 == 0:\n summ += A\n else:\n summ += B\n i += 1\n\nprint(side[(i - 1) % len(side)])\noutput_file = open('output.txt', 'w')\noutput_file.write(side[(i - 1) % len(side)])\noutput_file.close\n" }, { "alpha_fraction": 0.5887850522994995, "alphanum_fraction": 0.5887850522994995, "avg_line_length": 14.428571701049805, "blob_id": "c9fab86e12f7760dec2252b1775c5e211d18cccf", "content_id": "9e57101509657d171967a971c4df077e8bd27cf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 107, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/2/J.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "a = list(map(int, input().split()))\n\nres = []\nfor i in a:\n\tif i not in res:\n\t\tres.append(i)\nprint(len(res))" }, { "alpha_fraction": 0.5828571319580078, "alphanum_fraction": 0.6057142615318298, "avg_line_length": 20.875, "blob_id": "df286a4d59f58014bb0cddbf1607d3d8dbaefd3f", "content_id": "a538336682adb491f2111649a11dd2adc878e558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 175, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/5/K.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nN = int(input())\n\na1 = np.array(list(map(int, input().split())))\na2 = np.array(list(map(int, input().split())))\n\nprint(' '.join(map(str, np.add(a1, a2))))\n" }, { "alpha_fraction": 0.578635036945343, "alphanum_fraction": 0.580118715763092, "avg_line_length": 26, "blob_id": "67c94cf9aabf948e3a2ca48138e0149bea157c8a", "content_id": "da0a2c6a0e1218a23425489807fba975978fdee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 57, "num_lines": 25, "path": "/1/M.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\ninput_file = open(\"input.txt\", \"r\")\n\nD, T = map(float, input_file.readline().split())\nv = tuple(map(float, input_file.readline().split()))\nw = tuple(map(float, input_file.readline().split()))\n\np = 0\nfor i in range(len(v)):\n for j in range(len(w)):\n if v[i] != w[j]:\n cur_p = (D - w[j] * T) / (v[i] - w[j]) * v[i]\n if cur_p > p and cur_p <= D:\n p = cur_p\n\ninput_file.close\n\noutput_file = open('output.txt', 'w')\noutput_file.write(str(float(p)))\noutput_file.close" }, { "alpha_fraction": 0.5210526585578918, "alphanum_fraction": 0.5263158082962036, "avg_line_length": 18.100000381469727, "blob_id": "c698caf3707deb4435d78b8fae930e2559288839", "content_id": "b01638850c4ac533e1b0666f6a0c1155b4e6d71e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 48, "num_lines": 10, "path": "/3/G.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\na = []\nfor i in range(n):\n\ta.append(list(map(int, input().split())))\n\nfor j in range(0, n):\n\tfor k in a:\n\t\tprint(k[j], sep = '', end = ' ', flush = True)\n\tprint(end = \"\\n\")" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5057471394538879, "avg_line_length": 23.85714340209961, "blob_id": "2ceeed4d6790ca9cad9c22d6a910f11f6bac1142", "content_id": "d14f86a43ead572e46675869788cc8c3559b4921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 34, "num_lines": 7, "path": "/4/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "def PrintMatrix(mat):\n\tfor i in mat:\n\t\tfor index, j in enumerate(i):\n\t\t\tif index != len(i) - 1:\n\t\t\t\tprint(j, sep = '', end = \" \")\n\t\t\telse:\n\t\t\t\tprint(j, sep = '', end = \"\\n\")\n" }, { "alpha_fraction": 0.6413043737411499, "alphanum_fraction": 0.6503623127937317, "avg_line_length": 19.44444465637207, "blob_id": "b84bd4329b09c9adfd07428b7634d9821787adb1", "content_id": "be393eeb3c99b985c780eec1e8afa97a37a3700f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 54, "num_lines": 27, "path": "/2/L.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\n\nimport os\nfld = os.path.dirname(os.path.abspath(__file__))\ninput_file = open(os.path.join(fld, 'input.txt'), 'r')\n\n# input_file = open('input.txt', \"r\")\n\nres = [0] * 10\n\nn = int(input_file.readline())\n\nfor i in range(n):\n\tres[(int(input_file.readline()) - 1)] += 1\n\ninput_file.close\n\noutput_file = open('output.txt', 'w')\n\noutput_file.write(' '.join(map(str, res)))\n\noutput_file.close\n" }, { "alpha_fraction": 0.4195402264595032, "alphanum_fraction": 0.4454022943973541, "avg_line_length": 19.52941131591797, "blob_id": "7a8580d8ab357c63df491ee2a7c4afdea717db1a", "content_id": "956e4910319057c9e48ac4378de42ae4606bc937", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 43, "num_lines": 17, "path": "/7/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "class king_lion():\n def __init__(self):\n self._x = 47\n\n def get_x (self):\n return self._x\n\n def del_x (self):\n pass\n \n def set_x(self, value):\n if type(value) == int:\n self._x = value\n if value < -100 or value > 100:\n self._x = 0\n\n x = property(get_x, set_x, del_x)" }, { "alpha_fraction": 0.5053763389587402, "alphanum_fraction": 0.5053763389587402, "avg_line_length": 17.700000762939453, "blob_id": "40458561bf7f4143120e5720b0aa32b4030e245d", "content_id": "f216bd7b1008b8af87a602426c5935ee7bad12be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 45, "num_lines": 10, "path": "/5/I.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "N, M, I, J = map(int, input().split())\n\na = []\nfor _ in range(N):\n a.append(list(map(int, input().split())))\n\na[I], a[J] = a[J], a[I]\n\nfor row in a:\n print(' '.join(map(str, row)))" }, { "alpha_fraction": 0.5574712753295898, "alphanum_fraction": 0.568965494632721, "avg_line_length": 18.44444465637207, "blob_id": "e332b51267f1c35b5c0399b5cc2b7faaba8777d7", "content_id": "1bf9981afa1b870a268ed80e0ede3c992b8ce3d2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/3/K.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n, m = map(int, input().split())\n\na = list(map(int, input().split()))\nindexes = list(map(int, input().split()))\n\nfor i in indexes:\n\ta[i - 1] += 1\n\nprint(' '.join(map(str,a)))" }, { "alpha_fraction": 0.6304348111152649, "alphanum_fraction": 0.6304348111152649, "avg_line_length": 22.5, "blob_id": "223a3f920260f7bfa4139ae8d01af71ecbead362", "content_id": "90c6e3569122c5c9859206f913e3af987ff42211", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/4/E.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "def Map(func, l):\n\treturn [func(i) for i in l]" }, { "alpha_fraction": 0.5431034564971924, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 13.625, "blob_id": "f71adc4e8011512171a5589231342b2a5894c01e", "content_id": "e2f4ec382984ac76dd0bfbd912d491b25b2a583d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/3/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "s = input().lower().replace(' ', '')\n\nreversed_s = s[::-1]\n\nif s == reversed_s:\n\tprint('True')\nelse:\n\tprint('False')" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.6022727489471436, "avg_line_length": 16.799999237060547, "blob_id": "d22c016280f88605f3651142d737ea6b7570d3eb", "content_id": "1c0ea60db04cfc313d905df95e0bfca04a6e4bca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 27, "num_lines": 5, "path": "/1/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "name = input()\nsurname = input()\n\nprint(\"Меня зовут \" + surname)\nprint(name + \" \" + surname)" }, { "alpha_fraction": 0.6326530575752258, "alphanum_fraction": 0.6326530575752258, "avg_line_length": 21.161291122436523, "blob_id": "bf269494744674d105c23dc8735f253848497855", "content_id": "78fc9e4ac9e3fb5a3f432193f428d07f723d2a60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 686, "license_type": "no_license", "max_line_length": 55, "num_lines": 31, "path": "/7/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import re\n# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\ninput_file = open(\"input.txt\", \"r\")\n\nn = int(input_file.readline())\nvalues = dict()\n\nfor _ in range(n):\n val = input_file.readline()\n group, student = re.split(r'\\t+', val.rstrip('\\n'))\n \n if group not in values:\n values[group] = []\n \n values[group].append(student)\n\ninput_file.close\n\noutput_file = open('output.txt', 'w')\n\nfor key in sorted(values):\n output_file.write(key + '\\n')\n for student in sorted(values[key]):\n output_file.write(student + '\\n')\n\noutput_file.close" }, { "alpha_fraction": 0.5970873832702637, "alphanum_fraction": 0.598705530166626, "avg_line_length": 19.633333206176758, "blob_id": "4b558ab4e93da70a34a6d6c6ae8d56965511478d", "content_id": "59155b75ed072a2802fcaaf6dc56379029615e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 86, "num_lines": 30, "path": "/2/M.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import re\n# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\ninput_file = open('d.in', \"r\")\n\ncontent = input_file.readline().replace('-', '')\ndisabled_chars = ['\\n', '.', ',', '!', '?']\n\nfor char in disabled_chars:\n\tcontent = content.replace(char, ' ')\n\ninput_file.close\n\noutput_file = open('d.out', 'w')\noutput_file.write(\n# print(\n\tstr(\n\t\tlen(\n\t\t\t[\n\t\t\t\tw for w in content.split(' ')\n\t\t\t\t\tif ((w not in disabled_chars) and (re.match('^[A-Za-z-]*$', w)) and (len(w) > 0))\n\t\t\t]\n\t\t)\n\t)\n)\noutput_file.close" }, { "alpha_fraction": 0.563829779624939, "alphanum_fraction": 0.563829779624939, "avg_line_length": 12.571428298950195, "blob_id": "b90246679e4825102b5fabe2c73eac35532c4b79", "content_id": "49f0c59ef27c60a67b5c2474949f29a6ccf96268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 94, "license_type": "no_license", "max_line_length": 19, "num_lines": 7, "path": "/2/F.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\na = []\nfor i in range(n):\n\ta.append(input())\na.reverse()\nprint('\\n'.join(a))" }, { "alpha_fraction": 0.5892857313156128, "alphanum_fraction": 0.5892857313156128, "avg_line_length": 21.600000381469727, "blob_id": "c0f37d4b2471611e1effd98358e835c9c757f565", "content_id": "509fefc9b446dd097d98f4a2623618fb40f1b71b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 58, "num_lines": 5, "path": "/1/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "surname = input()\nname = input()\nsubdomain = input()\n\nprint(surname + \".\" + name + \"@\" + subdomain + \".dvfu.ru\")" }, { "alpha_fraction": 0.5290322303771973, "alphanum_fraction": 0.5419355034828186, "avg_line_length": 21.285715103149414, "blob_id": "398d77d695e87e5ce7b95a93117733641c5e455c", "content_id": "e1425433d58dcc094d89145957ede7af0f2eea17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 48, "num_lines": 7, "path": "/4/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "a = list(map(str, input().split()))\n\nb = {i[1]: i for i in a}\n\nprint(' '.join([b[i] for i in sorted(b)]))\n\n# print(' '.join(sorted(a, key=lambda x: x[1])))" }, { "alpha_fraction": 0.582524299621582, "alphanum_fraction": 0.582524299621582, "avg_line_length": 14.5, "blob_id": "88c34857a93a3aea665b1a2d2358ecb2eae48008", "content_id": "96d77dda78fbe2e53fbc95e7903e8ff59c4199ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 309, "license_type": "no_license", "max_line_length": 35, "num_lines": 20, "path": "/3/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import sys\na = list(map(int, input().split()))\n\na_min = sys.maxsize\na_max = -sys.maxsize\nindexes = []\n\nfor index, i in enumerate(a):\n\tif i >= a_max:\n\t\tif i > a_max:\n\t\t\tdel(indexes[:])\n\t\tindexes.append(index)\n\t\ta_max = i\n\tif i < a_min:\n\t\ta_min = i\n\nfor i in indexes:\n\ta[i] = a_min\n\nprint(' '.join(map(str, a)))" }, { "alpha_fraction": 0.4480000138282776, "alphanum_fraction": 0.4480000138282776, "avg_line_length": 9.5, "blob_id": "a5a63149d3f9f71e002bedba649e1bf7ff2cb049", "content_id": "1265dd71b0ae8a1cd591cb10c60f8ee5652b50a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 131, "license_type": "no_license", "max_line_length": 16, "num_lines": 12, "path": "/1/G.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "reply = input()\n\narr = [\n \"Просто!\",\n \"Easy!\",\n \"Einfach!\"\n]\n\nif reply in arr:\n print(\":)\")\nelse:\n print(\":(\")" }, { "alpha_fraction": 0.6890756487846375, "alphanum_fraction": 0.6890756487846375, "avg_line_length": 25.55555534362793, "blob_id": "b0a2a6339257c23e27876b0538f8dc093fa55d35", "content_id": "623c77dbfada0c942e4662fc2ad71a633173af65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/6/F.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "allProperties = dir(eval(input())())\n\npublic = [val for val in allProperties if not val.startswith('_')]\nprivate = [val for val in allProperties if val.startswith('_')]\n\nfor val in public:\n print(val)\nfor val in private:\n print(val)" }, { "alpha_fraction": 0.6106194853782654, "alphanum_fraction": 0.6106194853782654, "avg_line_length": 21.799999237060547, "blob_id": "720e969e2bc6abd505121b66a73876cb7356978e", "content_id": "fcac1c232e422d2a6987de8610f1be3c7a3d5580", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 33, "num_lines": 5, "path": "/4/I.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "def Join(array, separator = ' '):\n\tres = ''\n\tfor i in array:\n\t\tres += i + separator\n\treturn res[:-len(separator)]" }, { "alpha_fraction": 0.5290697813034058, "alphanum_fraction": 0.5465116500854492, "avg_line_length": 16.299999237060547, "blob_id": "399af9cc56ed91dd5c066bd2a27bb81a055db3cc", "content_id": "542f8081851d1a1f284c67289ee6a740ab702512", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 172, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/2/I.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\na = []\nfor i in range(2):\n\ta.append(list(map(int, input().split())))\n\nres = []\nfor i in range(n):\n\tres.append(str(a[0][i] + a[1][i]))\nprint(' '.join(res))" }, { "alpha_fraction": 0.47155362367630005, "alphanum_fraction": 0.4770240783691406, "avg_line_length": 30.55172348022461, "blob_id": "275ea765e1f5255ed18afe2b5f2002f614ab86a0", "content_id": "01c699a828332bcf1edcf708acb2c8c48e232019", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 914, "license_type": "no_license", "max_line_length": 51, "num_lines": 29, "path": "/6/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "class ShiftableList(list):\n # def __init__(self, list):\n # super().__init__(list)\n # self.list = list\n def checkInt(self, val):\n if type(val) != int:\n raise TypeError('Wrong input value')\n def shift(self, shift, dir = 'left'):\n shf = shift % self.__len__()\n lst = self.copy()\n for _ in range(shf):\n if dir == 'left':\n lst.append(lst.pop(0))\n else:\n lst.insert(0, lst.pop(-1))\n return lst\n def __rshift__(self, shift):\n self.checkInt(shift)\n if shift < 0:\n return self.__lshift__(abs(shift))\n else:\n return self.shift(shift, dir = 'right')\n \n def __lshift__(self, shift):\n self.checkInt(shift)\n if shift < 0:\n return self.__rshift__(abs(shift))\n else:\n return self.shift(shift, dir = 'left')" }, { "alpha_fraction": 0.5542168617248535, "alphanum_fraction": 0.5662650465965271, "avg_line_length": 11, "blob_id": "f50e915927290ed5bd6ac1e6e8d94bd6e33b10f8", "content_id": "846e7c6b30a1fad2e5c6870fe5790dd66fbc6c3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/3/F.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "a = list(map(int, input().split()))\n\nres = 0\nfor i in a:\n\tres += abs(i)\n\nprint(res)" }, { "alpha_fraction": 0.630797803401947, "alphanum_fraction": 0.6456400752067566, "avg_line_length": 23.545454025268555, "blob_id": "a8c050bef99e40f8746f09b931b17fa21c7dc3f3", "content_id": "9eb901c464a9a77f9bd2a02d29ba3b68227fe6d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 539, "license_type": "no_license", "max_line_length": 55, "num_lines": 22, "path": "/1/L.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\ninput_file=open(\"input.txt\", \"r\")\n\nH_out, M_out = map(int, input_file.readline().split())\nH_way, M_way = map(int, input_file.readline().split())\n\ninput_file.close\n\noutput_file = open('output.txt', 'w')\n\nhours = (H_out + H_way) % 24\nminutes = (M_out + M_way) % 60\n\nhours += int((M_out + M_way) / 60)\n\noutput_file.write(str(hours % 24) + \" \" + str(minutes))\noutput_file.close" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.44285714626312256, "avg_line_length": 19.14285659790039, "blob_id": "4955571d574bb9b8a605e46c2bc63e4d1641da52", "content_id": "912c4e8f46a0b84b91b97a44892a181972415de1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 144, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/2/D.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\nfor i in reversed(range(n + 1)):\n\tif i == 0:\n\t\tprint('Пуск', '!' * n, sep = '')\n\telse:\n\t\tprint(i, '!' * (n - i), sep = '')" }, { "alpha_fraction": 0.4804469347000122, "alphanum_fraction": 0.5251396894454956, "avg_line_length": 17, "blob_id": "85dcf81580d095fb2653becdc9da7d4aedf208ea", "content_id": "356c49d112b124c0dc0f2d03fd5885c119d9106c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 179, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/1/I.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "reply = int(input())\n\nif reply % 3 == 0 and reply % 5 == 0:\n print(\"FizzBuzz\")\nelif reply % 3 == 0:\n print(\"Fizz\")\nelif reply % 5 == 0:\n print(\"Buzz\")\nelse:\n print(\"\")" }, { "alpha_fraction": 0.5840708017349243, "alphanum_fraction": 0.6106194853782654, "avg_line_length": 13.25, "blob_id": "30b6390ff4d049ffcf66b057dddc4b9153e3bc18", "content_id": "4d79e4d403304c6c0af4cd8ea6b3e2a19338988d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 37, "num_lines": 8, "path": "/3/H.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import math\na = list(map(float, input().split()))\n\nres = 0.0\nfor i in a:\n\tres += pow(i, 2)\n\nprint(math.sqrt(res))" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6222222447395325, "avg_line_length": 10.375, "blob_id": "18e87d8c00fe29e8b314a37cfd5f95c3c6751f55", "content_id": "2b5aaac5fc386504a003a8997ac13152075456a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 25, "num_lines": 8, "path": "/1/J.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "names = []\n\nfor name in range(3):\n names.append(input())\n\nnames.sort()\n\nprint(names[0])" }, { "alpha_fraction": 0.5839999914169312, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 14.75, "blob_id": "e6166ca8e4ddb73a32d37772ccf9352993169b92", "content_id": "1d86c2c5fb6ed9c55e078289aec1c3212a8e575f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 125, "license_type": "no_license", "max_line_length": 27, "num_lines": 8, "path": "/1/E.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "x = int(input())\ny = int(input())\nt = int(input())\n\nlitersIn = y * t\nlitersOut = int(t / 10) * x\n\nprint(litersIn - litersOut)" }, { "alpha_fraction": 0.5687623023986816, "alphanum_fraction": 0.5943025350570679, "avg_line_length": 38.94117736816406, "blob_id": "80630cf4522f6714d882a94c9b95a3bb798bbba7", "content_id": "ce0b45d8fe1a53ea723c8f5200d858ec92d25009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2387, "license_type": "no_license", "max_line_length": 162, "num_lines": 51, "path": "/6/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "class Time:\n # Конструктор, принимающий четыре целых числа: часы, минуты, секунды и миллисекунды.\n # В случае, если передан отрицательный параметр, вызвать исключение ValueError.\n # После конструирования, значения параметров времени должны быть корректными:\n # 0 <= GetHour() <= 23\n # 0 <= GetMinute() <= 59\n # 0 <= GetSecond() <= 59\n # 0 <= GetMillisecond() <= 999\n def __init__(self, hours=0, minutes=0, seconds=0, milliseconds=0):\n if (hours < 0) or (minutes < 0) or (seconds < 0) or (milliseconds < 0):\n raise ValueError('Wrong init')\n else:\n self.hours = hours\n self.minutes = minutes\n self.seconds = seconds\n self.milliseconds = milliseconds\n def GetHour(self):\n return self.hours\n def GetMinute(self):\n return self.minutes\n def GetSecond(self):\n return self.seconds\n def GetMillisecond(self):\n return self.milliseconds\n # Прибавляет указанное количество времени к текущему объекту.\n # После выполнения этой операции параметры времени должны остаться корректными.\n def Add(self, time):\n self.milliseconds += time.GetMillisecond()\n seconds = 0\n if self.milliseconds > 999:\n seconds = int(self.milliseconds / 1000)\n self.milliseconds %= 1000\n \n self.seconds += time.GetSecond() + seconds\n minutes = 0\n if self.seconds > 59:\n minutes = int(self.seconds / 60)\n self.seconds %= 60\n\n self.minutes += time.GetMinute() + minutes\n hours = 0\n if self.minutes > 59:\n hours = int(self.minutes / 60)\n self.minutes %= 60\n \n self.hours += time.GetHour() + hours\n self.hours %= 24\n # Операторы str и repr должны представлять время в формате\n # HH:MM:SS.ms\n def __str__(self):\n return (str(self.GetHour())).zfill(2) + ':' + (str(self.GetMinute())).zfill(2) + ':' + (str(self.GetSecond()).zfill(2)) + '.' + str(self.GetMillisecond())" }, { "alpha_fraction": 0.5444444417953491, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 10.375, "blob_id": "75d8cf70ee1e55262d45515a0851e2451f17748d", "content_id": "f42fea879d3bf28bf8dc2813c89ffce178265998", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 90, "license_type": "no_license", "max_line_length": 20, "num_lines": 8, "path": "/3/A.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "money = 1000\nn = int(input())\n\nres = 0\nwhile res < money:\n\tres += int(input())\n\nprint(res)" }, { "alpha_fraction": 0.6916666626930237, "alphanum_fraction": 0.6916666626930237, "avg_line_length": 23.100000381469727, "blob_id": "c2eb29c30d3206292f691ddb5e7c6f79b7bae828", "content_id": "1a69a95161a3f0d85271c5ae0e4db5e590e2aea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 66, "num_lines": 10, "path": "/6/A.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import re\nallProperties = (dir(str()))\n\npublic = [val for val in allProperties if not val.startswith('_')]\nprivate = [val for val in allProperties if val.startswith('_')]\n\nfor val in public:\n print(val)\nfor val in private:\n print(val)" }, { "alpha_fraction": 0.5840708017349243, "alphanum_fraction": 0.6017699241638184, "avg_line_length": 15.285714149475098, "blob_id": "aee069c5e9e0c9dea27d578705a432fd60ae1c56", "content_id": "12e199c841f8818391a8e53049dfebaf51bb0d58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 113, "license_type": "no_license", "max_line_length": 40, "num_lines": 7, "path": "/2/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "n = int(input())\n\ncount = 0\nfor i in range(n):\n\tif input().lower() == 'boletus edulis':\n\t\tcount += 1\nprint(count)" }, { "alpha_fraction": 0.6210191249847412, "alphanum_fraction": 0.6210191249847412, "avg_line_length": 17.52941131591797, "blob_id": "e7f62ee9753d3c9974a4684c36d3a72f37d39f0b", "content_id": "379e0c84d179b27a40eb818420083f20237ad5fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 41, "num_lines": 17, "path": "/1/H.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "reply = input()\n\ndifficult = \"\"\nif \",\" in reply:\n difficult = \"сложное \"\n\nres = \"\"\nif \"!\" in reply and \"?\" in reply:\n res = \"вопросительно-восклицательное\"\nelif \"!\" in reply:\n res = \"восклицательное\"\nelif \"?\" in reply:\n res = \"вопросительное\"\nelse:\n res = \"повествовательное\"\n\nprint(difficult + res)" }, { "alpha_fraction": 0.49659863114356995, "alphanum_fraction": 0.49659863114356995, "avg_line_length": 11.333333015441895, "blob_id": "e9b78477556048e2691c24c7e6248458bba13b0d", "content_id": "eb9fcc6b13650f7feb86bc2b2e8780eef9de0044", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 198, "license_type": "no_license", "max_line_length": 16, "num_lines": 12, "path": "/1/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "arr = [\n \"Красный\",\n \"Оранжевый\",\n \"Желтый\",\n \"Зеленый\",\n \"Голубой\",\n \"Синий\",\n \"Фиолетовый\"\n]\n\nfor val in arr:\n print(val)" }, { "alpha_fraction": 0.5319148898124695, "alphanum_fraction": 0.5319148898124695, "avg_line_length": 11, "blob_id": "4a997b075ac159e16c94c288ac3e1a2de607a67e", "content_id": "f900d5b70dccb2a056b94648f20130d6aed7c7c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 16, "num_lines": 4, "path": "/1/F.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "M = int(input())\nN = int(input())\n\nprint(M / N)" }, { "alpha_fraction": 0.6008583903312683, "alphanum_fraction": 0.6287553906440735, "avg_line_length": 21.238094329833984, "blob_id": "b234e5de60e6d31822aaae26ac300a62fc5ef6c6", "content_id": "bd54a23d3a5195063c5359c8a8280690eb8f7109", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 466, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/1/K.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.txt'\n# input_file = file_location.open()\n\ninput_file = open(\"input.txt\", \"r\")\ncontent = input_file.read()\n\nA, B = map(int, content.split())\n\ninput_file.close\n\noutput_file = open('output.txt', 'w')\n\nif ((A + B - 1) % 4 != 0) or (A < 1) or (B > 1000000):\n output_file.write(\"0\")\nelse:\n output_file.write(str(A + B - 1))\n\noutput_file.close" }, { "alpha_fraction": 0.5308057069778442, "alphanum_fraction": 0.5592417120933533, "avg_line_length": 18.272727966308594, "blob_id": "ff96621afa35afb10ebccb59197a3e8c071abba2", "content_id": "ceffa11c8697ce776a60a5af056899e96239c6fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 211, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/3/I.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "a = list(map(int, input().split()))\n\na_copy = a.copy()\nshift = 0\nfor index, el in enumerate(a):\n\tif el == 0:\n\t\tif index - 1 >= 0:\n\t\t\tdel a_copy[index - 1 - shift]\n\t\t\tshift += 1\n\nprint(' '.join(map(str, a_copy)))" }, { "alpha_fraction": 0.5428571701049805, "alphanum_fraction": 0.5428571701049805, "avg_line_length": 16.75, "blob_id": "3f3bf21a4e09529939c911b9f5a80fc3b81a315f", "content_id": "9a25c94c05c5be64a28fa31591c3d92d4aa671b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 70, "license_type": "no_license", "max_line_length": 21, "num_lines": 4, "path": "/7/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "def nth_element(n):\n def f(lst):\n return lst[n]\n return f" }, { "alpha_fraction": 0.5290322303771973, "alphanum_fraction": 0.5548387169837952, "avg_line_length": 14.600000381469727, "blob_id": "fff757bec9288b5394dbe75bc0927eed60df3332", "content_id": "7418f55106b4891a15e298fd794acd269184c04b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 155, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/2/N.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "res = []\nwhile True:\n\ttry:\n\t\tval = input()\n\texcept EOFError:\n\t\tbreak\n\telse:\n\t\tres.append(val)\nprint(', '.join(res[0:][::2]))\nprint(', '.join(res[1:][::2]))" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.5769230723381042, "avg_line_length": 16.66666603088379, "blob_id": "723fffe7e9a6cfac20ee2f258e486559cd168852", "content_id": "233c5438b335ef216ac325b228641f999a5dd336", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 25, "num_lines": 3, "path": "/2/A.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "while input() != 'Stop!':\n\tprint('Каша')\nprint('Ок')" }, { "alpha_fraction": 0.4423076808452606, "alphanum_fraction": 0.4711538553237915, "avg_line_length": 16.5, "blob_id": "c76345b1b189c44fad0c024e205eb0acebd5ceb6", "content_id": "fc72a7be05c4705b74d4424fbf7b0a7a10ee71ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/3/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "s = input()\nn = int(input())\n\na = s.split(' ')\ni = n - 1 if (n - 1 < len(a)) else len(a) - 1\nprint(a[i])" }, { "alpha_fraction": 0.49253731966018677, "alphanum_fraction": 0.5024875402450562, "avg_line_length": 14.538461685180664, "blob_id": "28312c14c6bb7e2f74d4564e8982c16925859102", "content_id": "346d5391887e39f96b40a909aea204e4d236f78b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 201, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/4/C.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "a, b, c = map(int, input().split())\n\ndef gcd(m, n):\n\tif m == 0:\n\t\treturn n\n\tif n == 0:\n\t\treturn m\n\tif m > n:\n\t\treturn gcd(m - n, n)\n\telse:\n\t\treturn gcd(m, n - m)\n\nprint(gcd(a, b), gcd(a, c), gcd(b, c))" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5634920597076416, "avg_line_length": 24.399999618530273, "blob_id": "a7bb71d87f7c5bc7fef9f1d1c12f630783b38926", "content_id": "938fcccdd39881f09e372ecfeabcad13c2e3fca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 61, "num_lines": 5, "path": "/5/B.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nS, d, N = list(map(float, input().split()))\n\nprint(' '.join(map(str, np.linspace(S, S + d * (N - 1), N))))" }, { "alpha_fraction": 0.6570680737495422, "alphanum_fraction": 0.6649214625358582, "avg_line_length": 30.91666603088379, "blob_id": "1b439793fcb54ab3e852ca02785e6a827cd43496", "content_id": "1372b84de70a5cd926ee3601ef3aaf34af3e8fa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 382, "license_type": "no_license", "max_line_length": 90, "num_lines": 12, "path": "/5/J.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\n# from pathlib import Path\n\n# script_location = Path(__file__).absolute().parent\n# file_location = script_location / 'input.csv'\n# input_file = file_location.open()\n\nfile_location = 'input.csv'\n\ndf = pd.read_csv(file_location, sep = ',', header = None)\nprint('\\n'.join([df.values[i][0] for i in (np.where(df == np.amax(df.values[:,1:]))[0])]))" }, { "alpha_fraction": 0.6523835062980652, "alphanum_fraction": 0.6555972099304199, "avg_line_length": 38.74468231201172, "blob_id": "07b3fd7bcee56e83a2f54ddde8b5646299d2ca51", "content_id": "f720df591b8fdb70ad075a8bfb77b52597e332ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1867, "license_type": "no_license", "max_line_length": 70, "num_lines": 47, "path": "/6/E.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import random\n\nclass Chromosome:\n # Chromosome class constructor\n # Actual functionality is to set up an array called genes.\n # If boolean flag fillGenes is set to True, genes must be\n # filled with random values between 0 and 1, otherwise\n # it must be filled with 0.\n # Length of array genes must be equal to length \n # constructor parameter.\n # Also initializes local variable mutationRate\n # with corresponding parameter.\n def __init__(self, length, mutationRate, fillGenes=False):\n self.length = length\n self.mutationRate = mutationRate\n self.genes = []\n\n for _ in range(length):\n if fillGenes:\n self.genes.append(random.random())\n else:\n self.genes.append(0)\n \n # Creates two offspring children using a single crossover point.\n # The basic idea is to first pick a random position, create two \n # children and then swap their genes starting from the randomly \n # picked position point.\n # Children genes must be different from both of parents.\n # \n # Returning type: (Chromosome, Chromosome)\n def Crossover(self, another):\n pos = random.randint(1, self.length - 1)\n\n first = Chromosome(self.length, self.mutationRate, False)\n second = Chromosome(self.length, self.mutationRate, False)\n first.genes = self.genes[pos:] + another.genes[:pos]\n second.gens = another.genes[pos:] + self.genes[:pos]\n\n return (first, second)\n\n # Mutates the chromosome genes in place by randomly switching them\n # depending on mutationRate. More precisely, mutation\n # of i-th gene happens with probability of mutationRate.\n def Mutate(self):\n for index in range(len(self.genes)):\n if random.random() < self.mutationRate:\n self.genes[index] = random.random()" }, { "alpha_fraction": 0.5736842155456543, "alphanum_fraction": 0.5736842155456543, "avg_line_length": 18.100000381469727, "blob_id": "8f1b31407f7278e542dfbe688f146bed9d5760a5", "content_id": "ae3f8e7664c641583342145f55fb7823c4ec4037", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/5/С.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "import numpy as np\n\np = int(input())\na = list(map(int, input().split()))\nn, m = list(map(int, input().split()))\n\na = np.reshape(a, (n, m))\n\nfor line in a:\n print(' '.join(map(str, line)))" }, { "alpha_fraction": 0.530434787273407, "alphanum_fraction": 0.530434787273407, "avg_line_length": 28, "blob_id": "43f0355b4306a3e5858cad76ff46a9f099d2096e", "content_id": "b40c281d119a452b9f7d2b05b171a1695b9f90eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 64, "num_lines": 4, "path": "/2/K.py", "repo_name": "bezgodov/python-basics", "src_encoding": "UTF-8", "text": "s = input()\n\ndisabled_chars = ['.', ',', '!', '?']\nprint(len([w for w in s.split(' ') if w not in disabled_chars]))" } ]
61
DJAHIDDJ13/S4
https://github.com/DJAHIDDJ13/S4
df631d57b7b8f0d1988e30f8aef13e214023b298
32a2eb598dda59b5062c610d43d0302dae4e5bc8
99a1d7952b83d42f50f054d936ac9e36665a6be6
refs/heads/master
2021-05-11T07:19:15.650695
2020-04-03T12:40:52
2020-04-03T12:40:52
118,014,243
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6756208539009094, "alphanum_fraction": 0.6791687607765198, "avg_line_length": 31.344263076782227, "blob_id": "b9c764d24cc9482d6e857f6d98e2bbd906953344", "content_id": "3d2e40832ed1797e6ff70575e95442abf1b9fcb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 92, "num_lines": 61, "path": "/BDAplus/BDAplus/insert_r_db.py~", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\nimport psycopg2\nimport sys\nimport pprint\nimport pandas as pd\n\ndef main():\n conn_string = \"host='localhost' dbname='olympics' user='postgres' password='123456'\"\n # print the connection string we will use to connect\n print(f\"Connecting to database\\n\\t-> {conn_string}\")\n \n # get a connection, if a connect cannot be made an exception will be raised here\n conn = psycopg2.connect(conn_string)\n \n # conn.cursor will return a cursor object, you can use this cursor to perform queries\n cursor = conn.cursor()\n print(\"Connected!\\n\")\n\n # Reading the csv file data as pandas dataframes\n cities = pd.read_csv('olympics_data/cities.csv', header=0)\n\n # For some reason the first two lines of the medallist csv are empty,\n # (ignoring the comments)\n medal = pd.read_csv('olympics_data/medallists.csv', header=2, comment='\"')\n \n # Here the na_filter=False is necessary since the country code for namibia\n # is NA which is interpreted as NaN by pandas\n ioc = pd.read_csv('olympics_data/ioccountrycodes.csv', header=0, na_filter=False)\n ioc.rename({'Country': 'Country_name'})\n\n\n sports = pd.read_csv('olympics_data/sportstaxonomy.csv', header=0)\n \n\n print(cities)\n print(medal)\n print(ioc)\n print(sports)\n \n print(medal['Medal'].value_counts())\n\n # Insert the data\n # CountryCodea\n # cursor.execute(\"INSERT INTO CountryCode() Values\")\n ioc.to_sql('CountryCode', con=conn, if_exists='replace') \n\n # execute our Query\n # cursor.execute(\"SELECT * FROM my_table\")\n\n\n # retrieve the records from the database\n #records = cursor.fetchall()\n\n # print out the records using pretty print\n # note that the NAMES of the columns are not shown, instead just indexes.\n # for most people this isn't very useful so we'll show you how to return\n # columns as a dictionary (hash) in the next example.\n #pprint.pprint(records)\n \nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.4540051817893982, "alphanum_fraction": 0.5051679611206055, "avg_line_length": 20.85875701904297, "blob_id": "29cb76fcf95bb3eaf35f18a49c318a44906256c6", "content_id": "2d2e951dc6824bb8ff1fab609d7c61d0e0e99337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3870, "license_type": "no_license", "max_line_length": 104, "num_lines": 177, "path": "/IE/TP/TP4/hough.cpp", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <cmath>\nusing namespace cv;\nint p = 0;\nvoid hough(Mat image, float angle_step, int* res_rho, float* res_theta) {\n\tint size = 180.0 / angle_step;\n\tint max_len = max(image.rows, image.cols);\n\tMat *acc = new Mat(max_len, size, sizeof(int));\n\n\tfor(int y = 0; y < image.rows; y++) {\n\t\tfor(int i = 0; i < size; i++) {\n\t\t\tacc->at<int>(Point(y, i)) = 0;\n\t\t}\n\t}\n\n\n\tfor(int y = 0; y < image.rows; y++) {\n\t\tfor(int x = 0; x < image.cols; x++) {\n\t\t\tfor(int i = 0; i < size; i++) {\n\t\t\t\tif(image.at<int>(Point(x, y)) > 0) {\n\t\t\t\t\tint theta = i * angle_step * (M_PI / 180);\n\t\t\t\t\tfloat rho = x * sin(theta) + y * cos(theta);\n\t\t\t\t\tacc->at<int>(Point(floor(rho), i))++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\t/*\n\tif(p == 0) {\n\t\tfor(int y = 0; y < max_len; y++) {\n\t\t\tfor(int i = 0; i < size; i++) {\n\t\t\t\tprintf(\"%d \", acc->at<int>(Point(y, i)));\n\t\t\t}\n\t\t\tprintf(\"\\n\");\n\t\t}sin\n\t\tp = 1;\n\t}\n\t*/\n\tint cur_max = 0;\n\tfor(int y = 0; y < max_len; y++) {\n\t\tfor(int i = 0; i < size; i++) {\n\t\t\tint cur_val = acc->at<int>(Point(y, i));\n\t\t\tif(cur_val > cur_max) {\n\t\t\t\tcur_max = cur_val;\n\n\t\t\t\t*res_rho = y;\n\t\t\t\t*res_theta = i * angle_step;\n\t\t\t}\n\t\t}\n\t}\n}\n\nvoid canny(Mat image, Mat* image2) {\n\tfloat cannyX[1][3] = {{-1, 0, 1}};\n\tfloat cannyY[3][1] = {{-1},\n\t\t\t {0},\n\t\t\t {1}};\n\n\tVec3b color;\n\t\n\t// X convolution\n\tfor(int y = 0; y < image.rows; y++) {\n\t\tfor(int x = 0; x < image.cols; x++) {\n\t\t\tdouble accX = 0.0;\n\t\t\tint n = 0;\n\t\t\tfor(int my = 0; my < 3; my++) {\n\t\t\t\tint nx = x, ny = y + my - 1;\n\t\t\t\tif(ny >= 0 && ny < image.rows) {\n\t\t\t\t\t// find grayscale value\t\n\t\t\t\t\tcolor = image.at<Vec3b>(Point(nx, ny));\n\t\t\t\t\taccX += cannyX[0][my] * (color.val[0] + color.val[1] + color.val[3]) / 3;\n\t\t\t\t\tn++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\taccX /= n;\n\n\t\t\tdouble accY = 0.0;\n\t\t\tn = 0;\n\t\t\tfor(int mx = 0; mx < 3; mx++) {\n\t\t\t\tint nx = x + mx - 1, ny = y;\n\t\t\t\tif(nx >= 0 && nx < image.cols) {\n\t\t\t\t\t// find grayscale value\t\n\t\t\t\t\tcolor = image.at<Vec3b>(Point(nx, ny));\n\t\t\t\t\taccY += cannyY[mx][0] * (color.val[0] + color.val[1] + color.val[3]) / 3;\n\t\t\t\t\tn++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\taccY /= n;\n\t\t\t\n\t\t\tVec3b curColor = image.at<Vec3b>(Point(x, y));\n\t\t\tcurColor.val[0] = curColor.val[1] = curColor.val[2] = (sqrt(accX * accX + accY * accY) > 8)? 255: 0 ;\n\t\t\timage2->at<Vec3b>(Point(x, y)) = curColor;\n\t\t}\n\t}\n}\n\n/*\nint main() {\n\tVideoCapture cap(0);\n\tif(!cap.isOpened()) {\n\t\tprintf(\"Error !!\");\n\t\treturn -1;\n\t}\n\t2602\n\n\tMat image;\n\tMat image2;\n\tint i = 0;\n\tnamedWindow(\"Edge detection\", 0);\n\tfor(;;) {\n\t\tcap >> image;\n\t\tif(i == 0) {\n\t\t\timage2 = image.clone();\n\t\t}\n\t\tcanny(image, &image2);\n\t\tfloat rho = 50.0;\n\t\tfloat theta = M_PI / 2;2602\n\t\tPoint p1, p2;72\n\t\t//p1.x = 0; p1.y = rho / sin(theta);\n\t\t//p2.x = rho / cos(theta); p2.y = 0;\n\t\t//p1.x = rho*cos(theta); p1.y = rho*sin(theta);\n\t\t//p2.x = rho*cos(theta+M_PI); p2.y = -rho*sin(theta+M_PI);\n\t\tp1.x = 300; p1.y = 300;\n\t\tp2.x = 900; p2.y = 900;\n\t\tprintf(\"%hd, %hd | %hd, %hd\\n\", p1.x, p1.y, p2.x, p2.y);(M_PI/180)\n\t\tline(image2, p1, p2, Scalar(0, 0, 255), 3, CV_AA);\n\t\timshow(\"Edge detection\", image2);\n\n\t\tif(waitKey(33) == 27) break;x * cos_t[t_idx] + y * sin_t[t_idx]\n\t\ti++;\n\t}\n\n\treturn 0;\n}\n*/\n\nint main(int, char**) {\n\tVideoCapture cap(0);\n\tif(!cap.isOpened()) {\n\t\tprintf(\"Error !!\");\n\n\n\t\treturn -1;\n\t}\n\t\n\tMat edges;\n\tnamedWindow(\"Edge detection\", 1);\n\tfor(;;) {\n\t\tMat frame;\n\t\tcap >> frame;\n\t\t\n\t\tcvtColor(frame, edges, CV_RGB2GRAY);\n\t\tGaussianBlur(edges, edges, Size(7, 7), 1.5, 1.5);\n\t\tCanny(edges, edges, 0, 30, 3);\n\t\tint rho;\n\t\tfloat theta;\n\t\t\n\t\though(edges, 10, &rho, &theta);\n\t\t\n\t\tPoint p1, p2;\n\t\t\n\t\tp1.x = rho / sin(theta * (M_PI / 180)); p1.y = 0;\n\t\tp2.x = 0; p2.y = -rho / cos(theta * (M_PI / 180));\n\t\n\t\tprintf(\"%d %g\\n\", rho, theta);\n\t\tprintf(\"%d %d %d %d\\n\", p1.x, p1.y, p2.x, p2.y);\n\n\t\tline(edges, p1, p2, Scalar(255, 255, 255), 3, CV_AA);\n\t\timshow(\"Edge detection\", edges);\n\n\t\tif(waitKey(33) == 27) break;\n\t\t\n\t}\n\treturn 0;\n}\n\n" }, { "alpha_fraction": 0.576440155506134, "alphanum_fraction": 0.583087146282196, "avg_line_length": 23.178571701049805, "blob_id": "c741b6db6d2a754f10c81ecbbe5a81069d6b29b7", "content_id": "37ceba1fe8a79141bb499e5bfce79d66702f4d9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5426, "license_type": "no_license", "max_line_length": 121, "num_lines": 224, "path": "/IA/4TP/TP4_IA_Kohonen_2018_2019_JA_NB/tp4_base_code/kohonen.c", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <math.h>\n\n#include \"kohonen.h\"\n\n/**\n * Initialisation de réseau\n */\nKOHONEN *initKohonen(int sizeX, int sizeY, int sizeInput, float (*distfunc)(float), float (*topDist)(int, int, int, int))\n{\n KOHONEN *map = malloc(sizeof(KOHONEN));\n\n map->sizeX = sizeX;\n map->sizeY = sizeY;\n map->sizeInput = sizeInput;\n map->phi = distfunc;\n map->topDist = topDist;\n\n\n map->weight = malloc(sizeof(float*) * sizeX * sizeY);\n\n for (int i = 0; i < sizeX * sizeY; i++) {\n map->weight[i] = calloc(sizeInput, sizeof(float));\n }\n\n map->input = calloc(sizeof(float), sizeInput);\n\n return map;\n}\n\n/**\n * Libération de mémoire de réseau\n */\nvoid freeKohonen(KOHONEN **map)\n{\n int total_size = ((*map)->sizeX) * ((*map)->sizeY);\n\n for (int i = 0; i < total_size; i++) {\n free((*map)->weight[i]);\n (*map)->weight[i] = NULL;\n }\n\n free((*map)->weight);\n (*map)->weight = NULL;\n\n free((*map)->input);\n (*map)->input = NULL;\n\n free(*map);\n *map = NULL;\n}\n\n/** Pour un carte torique\n * A ring topology for the neurones ie the 0th and sizeX-1th neurones are neighbors\n * to find a loop (end of the path is at the start) for the traveling salesman problem\n */\nfloat loopTopologicalDistance(int row_size, int column_size, int a, int b)\n{\n int aX = a % row_size;\n int aY = ((int) a / row_size);\n\n int bX = b % row_size;\n int bY = ((int) b / row_size);\n\n // --------o--------------o----------- // the two o's are the indices of the two we want to compare\n // ________o--------------o___________ // the reverse distance\n // --------o______________o----------- // the normal distance\n // we take the minimum between these two distances to get the \n return fmin(\n fabs(aX - bX), // normal distance between the two \n row_size - fmax(aX, bX) + fmin(aX,bX) // distance if we go the other direction ie through the 0\n ) +\n\n fmin(\n fabs(aY - bY), \n column_size - fmax(aY, bY) + fmin(aY,bY)\n );\n}\n\n/**\n * Distance topologique entre deux neurones\n */\nfloat topologicalDistance(int row_size, int column_size, int a, int b)\n{\n int aX = a % row_size;\n int aY = ((int) a / row_size);\n\n int bX = b % row_size;\n int bY = ((int) b / row_size);\n (void) column_size;\n\n return fabs(aX - bX) + fabs(aY - bY);\n}\n\n/**\n * Fonction d'activation de potentielle\n */\nfloat activation(float x)\n{\n return 1 / (1 + x);\n}\n\n/**\n * Distance manhattan (norme ordre 0)\n */\nfloat manhattanDistance(float *v1, float *v2, int size)\n{\n float total_dist = 0.0;\n for (int i = 0; i < size; i++) {\n total_dist += fabs(v1[i] - v2[i]);\n }\n return total_dist;\n}\n\n/**\n * Distance euclidienne (norme ordre 1)\n */\nfloat euclidianDistance(float *v1, float *v2, int size)\n{\n float total_dist = 0.0;\n for (int i = 0; i < size; i++) {\n total_dist += (v1[i] - v2[i]) * (v1[i] - v2[i]);\n }\n return sqrt(total_dist);\n}\n\n/**\n * Fonction de potentielle\n */\nfloat potential(float *v1, float *v2, int size)\n{\n return activation(euclidianDistance(v1, v2, size)); // You can change the type of distance here\n // either euclidianDistance or manhattanDistance\n}\n\n/**\n * Chercher le neurones gagnant\n */\nvoid findWinner(KOHONEN* map, float* input)\n{\n int total_size = map->sizeX * map->sizeY;\n\n memcpy(map->input, input, sizeof(float) * map->sizeInput);\n\n float cur_max = -1;\n for (int i = 0; i < total_size; i++) {\n float pot = potential(map->weight[i], input, map->sizeInput);\n if(pot > cur_max) {\n cur_max = pot;\n map->winner = i;\n }\n }\n}\n\n/**\n * Mise a jour de carte (le gagnant est stocké dans le structure)\n */\nvoid updateKohonen(KOHONEN* map, float* input, float EPSILON)\n{\n findWinner(map, input);\n\n int total_size = map->sizeX * map->sizeY;\n\n for (int i = 0; i < total_size; i++) {\n //float dist = loopTopologicalDistance(map->sizeX, map->sizeY, i, map->winner); \n // uncomment this for a loop neurone topology (view the loopTopologicalDistance function for more) <<< \n float dist = map->topDist(map->sizeX, map->sizeY, i, map->winner);\n\n for (int j = 0; j < map->sizeInput; j++) {\n map->weight[i][j] += EPSILON * (map->input[j] - map->weight[i][j]) * map->phi(dist);\n }\n }\n}\n\n/**\n * Apprentissage de réseau\n */\nvoid trainKohonen(KOHONEN* map, TRAINING_DATA* data, int num_iter, float EPSILON)\n{\n for (int i = 0; i < num_iter; i++) {\n // choose an input randomly\n int choice = rand() % data->numInput;\n\n // find the winner then update the neurones\n updateKohonen(map, data->input[choice], EPSILON);\n } \n}\n\n/**\n * Initialisation de données d'entrainage\n */\nTRAINING_DATA* initTrainingData(int numInput, int sizeInput)\n{\n TRAINING_DATA* data = malloc(sizeof(TRAINING_DATA));\n\n data->sizeInput = sizeInput;\n data->numInput = numInput;\n \n data->input = malloc(sizeof(float*) * numInput);\n for (int i = 0; i < numInput; i++) {\n data->input[i] = calloc(sizeof(float), sizeInput);\n }\n\n return data;\n}\n\n/**\n * Libération mémoire de données d'entrainage\n */\nvoid freeTrainingData(TRAINING_DATA** data)\n{\n for (int i = 0; i < (*data)->numInput; i++) {\n free((*data)->input[i]);\n (*data)->input[i] = NULL;\n }\n\n free((*data)->input);\n (*data)->input = NULL;\n\n free(*data);\n *data = NULL;\n}\n" }, { "alpha_fraction": 0.6981292366981506, "alphanum_fraction": 0.7117347121238708, "avg_line_length": 27, "blob_id": "fe06c8873cd01191c57e0c30f8bf0449a30b296c", "content_id": "339631fc59c7308e21d182345105f96e89281afc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 65, "num_lines": 42, "path": "/BDAplus/create_r_db2.sql", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "CREATE TABLE City (\n city_id INT NOT NULL UNIQUE PRIMARY KEY,\n city_name VARCHAR(64) NOT NULL,\n NOC VARCHAR(3) NOT NULL\n);\n\nCREATE TABLE CountryCode (\n NOC VARCHAR(3) NOT NULL PRIMARY KEY,\n country_name VARCHAR(64) NOT NULL ,\n ISOCode VARCHAR(2) NOT NULL\n);\n\nCREATE TABLE Discipline (\n\tdiscipline_id INT NOT NULL UNIQUE PRIMARY KEY,\n sport_name VARCHAR(64),\n discipline_name VARCHAR(64)\n);\n\nCREATE TYPE Event_Gender_t AS ENUM ('W', 'M', 'X');\nCREATE TABLE Event(\n event_id INT NOT NULL UNIQUE PRIMARY KEY,\n event_name VARCHAR(64),\n event_gender Event_Gender_t,\n\tdiscipline_id INT NOT NULL REFERENCES Discipline(discipline_id),\n edition INT,\n city_id INT NOT NULL REFERENCES City(city_id)\n);\n\nCREATE TYPE Gender_t AS ENUM ('Women', 'Men');\nCREATE TABLE Athlete(\n athlete_id INT NOT NULL UNIQUE PRIMARY KEY,\n athlete_name VARCHAR(64) NOT NULL,\n athlete_gender Gender_t\n);\n\nCREATE TYPE Medal_t AS ENUM ('Gold', 'Silver', 'Bronze');\nCREATE TABLE Medal (\n athlete_id INT NOT NULL REFERENCES Athlete(athlete_id),\n event_id INT NOT NULL REFERENCES Event(event_id),\n represent_NOC VARCHAR(3) REFERENCES CountryCode(NOC),\n medal_type Medal_t\n);\n" }, { "alpha_fraction": 0.5428643226623535, "alphanum_fraction": 0.5535070896148682, "avg_line_length": 25.22637367248535, "blob_id": "e04080700e0773c67611b0d9b90033c58911bc34", "content_id": "4ebb0366afa6b1953506521ef2fd0447aef08dc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11936, "license_type": "no_license", "max_line_length": 131, "num_lines": 455, "path": "/IA/3TP/perceptron.c", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n#include <string.h>\n\n#define EPSILON 0.01\n#define BIAS_EPSILON 0.1\n\n// structure pour chaque neurone\ntypedef struct neurone {\n float *weight; // tableau de poids\n float (*activation)(float); // la fonction d'activation\n float out;\n} NEURONE;\n\ntypedef struct network {\n int num_layers;\n NEURONE **layers; \n float *biases;\n int *sizes; // les tailles de chaque couche dans layers\n} NETWORK;\n\n// pour stocker un entrée de données\ntypedef struct training_data_entry {\n float *input;\n float *output;\n} DATA_ENTRY;\n\n// données d'entrainement\ntypedef struct training_data {\n int size;\n int num_in, num_out;\n DATA_ENTRY *entries;\n} TRAINING_DATA;\n\nfloat heaviside(float x)\n{\n return (x > 0) ? 1 : 0;\n}\n\nfloat identity(float x)\n{\n return x;\n}\n\nfloat sigmoid(float x)\n{\n return 1 / (1 + exp(-x));\n}\n\n\n/**\n * To display the 'actual' i'th neurone when showing the neural network\n */\nvoid show_letter(int i) {\n printf(\"%c\", (char) i + 'A');\n}\n\nvoid AC_letter(int i) {\n printf(\"%c\", (i == 1)?'A':'C');\n}\n\n/*\n * repr is a function that displays the value the neurone represents\n * ex: for recognizing the alphabet it should print the letter from its index\n */\nvoid dumpNetwork(NETWORK *network, void (*repr)(int))\n{\n printf(\"Network dump\\n\");\n int num_layers = network->num_layers;\n\n int max_size = 0;\n\n for(int i = 0; i < network->num_layers; i++) {\n int new_size = network->sizes[i];\n max_size = (max_size < new_size) ? new_size : max_size;\n }\n\n // finding the top 3 winners ie the 3 output neurones with the biggest .out value\n int win = -1;\n int win2 = -1;\n int win3 = -1;\n\n int out_layer_size = network->sizes[network->num_layers-1];\n NEURONE* out_layer = network->layers[network->num_layers-1];\n for (int i = 0; i < out_layer_size; i++) {\n if(win == -1 || out_layer[i].out >= out_layer[win].out) {\n win3 = win2;\n win2 = win;\n win = i;\n } else if(win2 == -1 || out_layer[i].out >= out_layer[win2].out) {\n win3 = win2;\n win2 = i;\n } else if(win3 == -1 || out_layer[i].out >= out_layer[win3].out) {\n win3 = i;\n }\n\n }\n\n // print the output of all the neurones of the network\n for(int i = 0; i < max_size; i++) {\n for(int layer = 0; layer < num_layers; layer++) {\n int translate = (max_size - network->sizes[layer]) / 2; // translate the display to the center\n \n if(i >= translate && i < max_size-translate) {\n printf(\"%.3f \", network->layers[layer][i - translate].out);\n if(layer == num_layers-1) {\n repr(i - translate);\n \n // show the top 3\n if(i - translate == win) \n printf(\" <-- first\"); \n else if(i - translate == win2)\n printf(\" <-- second\");\n else if(i - translate == win3)\n printf(\" <-- third\");\n }\n }\n printf(\"\\t\");\n }\n\n printf(\"\\n\");\n }\n printf(\"\\n\");\n}\n\n\n/**\n * Initialize the network with random values [0, 1] for the weights and 0 for\n * the biases\n */\nNETWORK* initNetwork(int num_layers, const int layer_sizes[])\n{\n NETWORK *network = malloc(sizeof(NETWORK));\n\n network->num_layers = num_layers;\n\n network->layers = malloc(sizeof(NEURONE*) * num_layers);\n network->biases = calloc(sizeof(float), num_layers);\n\n network->sizes = malloc(sizeof(int) * num_layers);\n\n for(int layer = 0; layer < num_layers; layer++) {\n network->sizes[layer] = layer_sizes[layer];\n\n network->layers[layer] = malloc(sizeof(NEURONE) * layer_sizes[layer]);\n\n float (*func)(float);\n int size;\n\n if(layer == 0) {\n func = identity;\n size = 1;\n } else {\n func = heaviside; // CHANGE THE ACTIVATION FUNCTION HERE sigmoid or heaviside\n size = layer_sizes[layer - 1];\n }\n\n for(int i = 0; i < layer_sizes[layer]; i++) {\n network->layers[layer][i].weight = malloc(sizeof(float) * size);\n for (int j = 0; j < size; j++) {\n network->layers[layer][i].weight[j] = rand() / RAND_MAX; \n }\n\n network->layers[layer][i].activation = func;\n }\n }\n\n return network;\n}\n\n/**\n * Liberate the memory fo the network\n */\nvoid freeNetwork(NETWORK** network_ptr)\n{\n NETWORK* network = *network_ptr;\n\n for(int layer = 0; layer < network->num_layers; layer++) {\n int cur_size = network->sizes[layer];\n\n for(int i = 0; i < cur_size; i++) {\n if(network->layers[layer][i].weight) {\n free(network->layers[layer][i].weight);\n }\n\n network->layers[layer][i].weight = NULL;\n }\n\n if(network->layers[layer]) {\n free(network->layers[layer]);\n }\n\n network->layers[layer] = NULL;\n }\n\n if(network->layers) {\n free(network->layers);\n }\n\n network->layers = NULL;\n\n if(network->sizes) {\n free(network->sizes);\n }\n\n network->sizes = NULL;\n\n if(*network_ptr) {\n free(*network_ptr);\n }\n\n *network_ptr = NULL;\n}\n\n/**\n * Forward propagation of the network of all the neurones\n */\nvoid evaluateNetwork(NETWORK* network, float* data)\n{\n // input layer\n for(int j = 0; j < network->sizes[0]; j++) {\n network->layers[0][j].out = data[j];\n }\n\n int num_layers = network->num_layers;\n\n for(int layer = 1; layer < num_layers; layer++) {\n\n int cur_layer_size = network->sizes[layer];\n int prev_layer_size = network->sizes[layer - 1];\n\n for(int i = 0; i < cur_layer_size; i++) {\n NEURONE *cur_neurone = &network->layers[layer][i];\n cur_neurone->out = 0.0;\n\n for(int j = 0; j < prev_layer_size; j++) {\n NEURONE *prev_neurone = &network->layers[layer - 1][j];\n cur_neurone->out += prev_neurone->out * cur_neurone->weight[j];\n }\n\n cur_neurone->out = cur_neurone->activation(cur_neurone->out - network->biases[layer]);\n }\n }\n}\n\n/**\n * Train the network using the data\n * Only works for two layers\n * */\nvoid trainNetwork(NETWORK *network, TRAINING_DATA *data)\n{\n int num_iter_min = 1000; // Minimum number of training steps\n int num_iter_max = 100000; // Maximum number of training steps\n float target_err = 0.01; // target error \n float err = 1.0; // error init value\n \n for(int i = 0; (i < num_iter_min || err > target_err) && i < num_iter_max; i++) {\n int choice = i % data->size; // select training examples consecutively\n // int choice = rand() % data->size; // select a random training example\n evaluateNetwork(network, data->entries[choice].input);\n\n err = 0.0;\n for(int layer = network->num_layers - 1; layer >= 1; layer--) {\n\n // for each neurone in the layer\n for(int k = 0; k < network->sizes[layer]; k++) {\n float sol = data->entries[choice].output[k];\n err += fabs(sol - network->layers[layer][k].out);\n network->biases[layer] -= BIAS_EPSILON * (sol - network->layers[layer][k].out);\n\n // for each weight of the k th neurone\n for(int j = 0; j < network->sizes[layer - 1]; j++) {\n network->layers[layer][k].weight[j] += EPSILON *\n (sol - network->layers[layer][k].out) *\n network->layers[layer - 1][j].out;\n }\n }\n }\n\n }\n\n}\n\n/**\n * Read the training data from a file\n */\nTRAINING_DATA *readTrainingData(const char *filename)\n{\n FILE* f = fopen(filename, \"r\");\n\n if(!f) {\n fprintf(stderr, \"Cannot open %s\\n\", filename);\n exit(1);\n }\n\n TRAINING_DATA *data = malloc(sizeof(TRAINING_DATA));\n\n if(fscanf(f, \"%d\\n\", &data->size) < 1) {\n fprintf(stderr, \"Cannot read number of data entries\\n\");\n exit(1);\n }\n\n data->entries = malloc(sizeof(DATA_ENTRY) * data->size);\n\n if(data->entries == NULL) {\n fprintf(stderr, \"Cannot allocate memory\\n\");\n exit(1);\n }\n\n if(fscanf(f, \"%d %d\\n\", &data->num_in, &data->num_out) < 2) {\n fprintf(stderr, \"Cannot read input or output sizes\\n\");\n exit(1);\n }\n\n for(int i = 0; i < data->size; i++) {\n data->entries[i].input = malloc(sizeof(float) * data->num_in);\n data->entries[i].output = malloc(sizeof(float) * data->num_out);\n\n if(data->entries[i].input == NULL || data->entries[i].output == NULL) {\n fprintf(stderr, \"Cannot allocate memory for input or output entry=%d\\n\", i);\n exit(1);\n }\n\n for(int j = 0; j < data->num_in; j++) {\n if(fscanf(f, \"%f\", &data->entries[i].input[j]) < 1) {\n fprintf(stderr, \"Cannot read input value entry=%d, value=%d\\n\", i, j);\n exit(1);\n }\n }\n\n for(int j = 0; j < data->num_out; j++) {\n if(fscanf(f, \"%f\", &data->entries[i].output[j]) < 1) {\n fprintf(stderr, \"Cannot read input value entry=%d, value=%d\\n\", i, j);\n exit(1);\n }\n }\n\n }\n\n fclose(f);\n\n return data;\n}\n\n/**\n * Frees up the training data\n */\nvoid freeTrainingData(TRAINING_DATA **data)\n{\n for(int i = 0; i < (*data)->size; i++) {\n if((*data)->entries[i].input) {\n free((*data)->entries[i].input);\n }\n\n (*data)->entries[i].input = NULL;\n\n if((*data)->entries[i].output) {\n free((*data)->entries[i].output);\n }\n\n (*data)->entries[i].output = NULL;\n }\n\n if((*data)->entries) {\n free((*data)->entries);\n }\n\n (*data)->entries = NULL;\n\n if(*data) {\n free(*data);\n }\n\n *data = NULL;\n}\n\n/**\n * Test and display network results on data\n */\nvoid testNetwork(NETWORK* network, TRAINING_DATA* data)\n{\n float err = 0.0;\n int num_correct = 0;\n\n for (int i = 0; i < data->size; i++) {\n evaluateNetwork(network, data->entries[i].input);\n dumpNetwork(network, show_letter);\n\n float loc_err = 0.0; \n int arg_max = -1;\n int base = -1;\n\n for (int j = 0; j < network->sizes[network->num_layers-1]; j++) {\n // update the local error\n loc_err += fabs(network->layers[network->num_layers-1][j].out - data->entries[i].output[j]);\n \n // find the network's guess ie the output neurone with the maximum value\n if(arg_max == -1 || network->layers[network->num_layers-1][j].out > network->layers[network->num_layers-1][arg_max].out) {\n arg_max = j;\n }\n\n // extract the training data's base truth ie find out which output neurone\n // has the biggest value in the training data\n if(base == -1 || data->entries[i].output[j] > data->entries[i].output[base]) {\n base = j;\n }\n }\n\n if(arg_max == base) {\n num_correct ++;\n printf(\"CORRECT\\n\");\n } else {\n printf(\"WRONG\\n\");\n }\n\n err += loc_err;\n }\n\n printf(\"Statistics of the NN on the testing data:\\n\"\n \"Total global error: %g, average = %g\\n\"\n \"Total accuracy: %d/%d = %g\\n\", \n err, err / data->size, \n num_correct, data->size,\n ((float)num_correct) / data->size);\n\n}\n\nint main(int argc, char *argv[])\n{\n if(argc != 3){\n fprintf(stderr, \"Usage: ./percepton [training_data] [testing_data]\\n\");\n exit(1);\n }\n TRAINING_DATA *train_data = readTrainingData(argv[1]);\n TRAINING_DATA *test_data = readTrainingData(argv[2]);\n\n if(train_data->num_in != test_data->num_in || train_data->num_out != test_data->num_out) {\n fprintf(stderr, \"Testing data size doesn't match the training data\\n\");\n exit(1);\n }\n\n const int layer_sizes[] = {train_data->num_in, train_data->num_out};\n NETWORK *network = initNetwork(2, layer_sizes);\n\n trainNetwork(network, train_data);\n\n testNetwork(network, test_data);\n\n freeNetwork(&network);\n freeTrainingData(&train_data);\n freeTrainingData(&test_data);\n\n return 0;\n}\n" }, { "alpha_fraction": 0.7041109204292297, "alphanum_fraction": 0.7041109204292297, "avg_line_length": 31.6875, "blob_id": "803aff49d5735854c5fc2b36c2d3a19bb1ea43ce", "content_id": "4ebacf91db1f871b4df57f0bc75e49b2b1a0854d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2092, "license_type": "no_license", "max_line_length": 137, "num_lines": 64, "path": "/BDAplus/build_graph_db.cql", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "create index on :Country(name);\ncreate index on :City(name);\ncreate index on :Sport(name);\ncreate index on :Discipline(name);\ncreate index on :Athlete(name);\ncreate index on :Event(name);\ncreate index on :Year(name);\ncreate index on :Gender(name);\ncreate constraint on (c:Country) assert c.noc is unique;\n\n\n//add the genders\ncreate (g:Gender {name:\"Men\"});\ncreate (g:Gender {name:\"Women\"});\n\n//load countries\nload csv with headers from\n\"file:///olympics_data/ioccountrycodes.csv\" as csv\ncreate (c:Country {name: csv.Country, noc: csv.NOC, iso: csv.ISOcode});\n\n//load cities\nload csv with headers from\n\"file:///olympics_data/cities.csv\" as csv\nmatch (c:Country {noc: csv.NOC})\nmerge (ci:City {name: csv.City})\nmerge (ci)-[:LOCATED_IN]->(c);\n\n//load sports taxonomy\nload csv with headers from\n\"file:///olympics_data/sportstaxonomy.csv\" as csv\nmerge (s:Sport {name: csv.Sport})\nmerge (s)<-[:PART_OF]-(d:Discipline {name: csv.Discipline});\n\n//load the games\nload csv with headers from\n\"file:///olympics_data/medallists.csv\" as csv\nmatch (c:City {name: csv.City})\nmerge (c)-[:HOSTS_GAMES]->(y:Year {name: csv.Edition});\n\n//load the events at the games\n//cypher planner=rule\nload csv with headers from\n\"file:///olympics_data/medallists.csv\" as csv\nmatch (y:Year {name: csv.Edition}), (d:Discipline {name: csv.Discipline})\nmerge (d)<-[:PART_OF]-(e:Event {name: csv.Event})-[:AT_GAMES]->(y);\n\n//load the medallists\nload csv with headers from\n\"file:///olympics_data/medallists.csv\" as csv\nmatch (g:Gender {name: csv.Gender})\nmerge (a:Athlete {name: csv.Athlete})-[:HAS_GENDER]->(g);\n\n//cypher planner=rule\nload csv with headers from\n\"file:///olympics_data/medallists.csv\" as csv\nmatch (a:Athlete {name: csv.Athlete}), (c:Country {noc: csv.NOC})\nmerge (a)-[:REPRESENTS]->(c);\n\n//cypher planner=rule\n//using periodic commit\nload csv with headers from\n\"file:///olympics_data/medallists.csv\" as csv\nmatch (a:Athlete {name: csv.Athlete}), (d:Discipline {name: csv.Discipline})<--(e:Event {name: csv.Event})-->(y:Year {name: csv.Edition})\ncreate (a)-[:WINS]->(m:Medal {type: csv.Medal})-[:REWARD_FOR]->(e);\n" }, { "alpha_fraction": 0.7149576544761658, "alphanum_fraction": 0.7206020951271057, "avg_line_length": 28.52777862548828, "blob_id": "dbb6f287b45db0b8bcae3a08551ea119653d5afc", "content_id": "bc3421f8254554fe7077299980bff667ab0d89c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1063, "license_type": "no_license", "max_line_length": 96, "num_lines": 36, "path": "/IA/4TP/TP4_IA_Kohonen_2018_2019_JA_NB/tp4_base_code/kohonen.h", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#ifndef KOHONEN_H\n#define KOHONEN_H\n\ntypedef struct kohonen {\n float **weight;\n float *input;\n int sizeX, sizeY;\n int sizeInput;\n int winner;\n float (*phi)(float);\n float (*topDist)(int, int, int, int);\n} KOHONEN;\n\ntypedef struct training_data {\n float** input;\n int sizeInput;\n int numInput;\n} TRAINING_DATA;\n\nKOHONEN *initKohonen(int, int, int, float (*func)(float), float (*topDist)(int, int, int, int));\nvoid freeKohonen(KOHONEN**);\nvoid updateKohonen(KOHONEN* map, float* input, float EPSILON);\nvoid trainKohonen(KOHONEN* map, TRAINING_DATA* data, int num_iter, float EPSILON);\n\nfloat potential(float *v1, float *v2, int size);\n\n/* For topological distance */\nfloat loopTopologicalDistance(int row_size, int column_size, int a, int b);\nfloat topologicalDistance(int row_size, int column_size, int a, int b);\n\nTRAINING_DATA* initTrainingData(int sizeInput, int numInput);\nvoid freeTrainingData(TRAINING_DATA** data);\n\nfloat manhattanDistance(float *v1, float *v2, int size);\nfloat euclidianDistance(float *v1, float *v2, int size);\n#endif\n" }, { "alpha_fraction": 0.4670083224773407, "alphanum_fraction": 0.5099295377731323, "avg_line_length": 19.272727966308594, "blob_id": "4b9bac091a4db438568cf94858fef1ff084f77e8", "content_id": "b5a193be685aa9291cab5980d72b9bf018090f67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1561, "license_type": "no_license", "max_line_length": 104, "num_lines": 77, "path": "/IE/TP3/edge_detect.cpp", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\n#include <cmath>\nusing namespace cv;\n\nvoid edge_detect(Mat image, Mat* image2) {\n\tfloat cannyX[1][3] = {{-1, 0, 1}};\n\tfloat cannyY[3][1] = {{-1},\n\t\t\t {0},\n\t\t\t {1}};\n\n\tVec3b color;\n\t\n\t// X convolution\n\tfor(int y = 0; y < image.rows; y++) {\n\t\tfor(int x = 0; x < image.cols; x++) {\n\t\t\tdouble accX = 0.0;\n\t\t\tint n = 0;\n\t\t\tfor(int my = 0; my < 3; my++) {\n\t\t\t\tint nx = x, ny = y + my - 1;\n\t\t\t\tif(ny >= 0 && ny < image.rows) {\n\t\t\t\t\t// find grayscale value\t\n\t\t\t\t\tcolor = image.at<Vec3b>(Point(nx, ny));\n\t\t\t\t\taccX += cannyX[0][my] * (color.val[0] + color.val[1] + color.val[3]) / 3;\n\t\t\t\t\tn++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\taccX /= n;\n\n\t\t\tdouble accY = 0.0;\n\t\t\tn = 0;\n\t\t\tfor(int mx = 0; mx < 3; mx++) {\n\t\t\t\tint nx = x + mx - 1, ny = y;\n\t\t\t\tif(nx >= 0 && nx < image.cols) {\n\t\t\t\t\t// find grayscale value\t\n\t\t\t\t\tcolor = image.at<Vec3b>(Point(nx, ny));\n\t\t\t\t\taccY += cannyY[mx][0] * (color.val[0] + color.val[1] + color.val[3]) / 3;\n\t\t\t\t\tn++;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\taccY /= n;\n\t\t\t\n\t\t\tVec3b curColor = image.at<Vec3b>(Point(x, y));\n\t\t\tcurColor.val[0] = curColor.val[1] = curColor.val[2] = (sqrt(accX * accX + accY * accY) > 8)? 255: 0 ;\n\t\t\timage2->at<Vec3b>(Point(x, y)) = curColor;\n\t\t}\n\t}\n\n}\n\nint main() {\n\tVideoCapture cap(0);\n\tif(!cap.isOpened()) {\n\t\tprintf(\"Error !!\");\n\t\treturn -1;\n\t}\n\t\n\n\tMat image;\n\tMat image2;\n\tint i = 0;\n\tnamedWindow(\"Edge detection\", 0);\n\tfor(;;) {\n\t\tcap >> image;\n\t\tif(i == 0) {\n\t\t\timage2 = image.clone();\n\t\t}\n\t\tedge_detect(image, &image2);\n\t\timshow(\"Edge detection\", image2);\n\n\t\tif(waitKey(33) == 27) break;\n\t\ti++;\n\t}\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.5179035067558289, "alphanum_fraction": 0.5364660024642944, "avg_line_length": 26.7233829498291, "blob_id": "043ad1194abe32896ed16ec14f196d04092b8a66", "content_id": "d26889e5482322f6c102417780145ce08e50e56d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 26562, "license_type": "no_license", "max_line_length": 184, "num_lines": 958, "path": "/IA/1TP/wargame.c", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <string.h>\n#include <time.h>\n\n#define NB_LIGNES 10\n#define NB_COLONNES 10\n\n// changed this to accomodate largest f_eval\n#define INFINI 1000000\n\n#define PROF_MAX 4\n\n// either 1 for alpha beta algorithm or 0 for without\n#define ALPHA_BETA 1\n/*#define DEBUG*/\n\n\ntypedef struct pion_s {\n int couleur;\n int valeur;\n} Pion;\n\nPion *plateauDeJeu;\n\nvoid f_affiche_plateau(Pion *plateau);\nint f_convert_char2int(char c);\nchar f_convert_int2char(int i);\n\n\n\nint f_convert_char2int(char c)\n{\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n if(c >= 'A' && c <= 'Z') {\n return (int)(c - 'A');\n }\n\n if(c >= 'a' && c <= 'z') {\n return (int)(c - 'a');\n }\n\n return -1;\n}\n\nchar f_convert_int2char(int i)\n{\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n return (char)i + 'A';\n}\n\nPion *f_init_plateau()\n{\n int i, j;\n Pion *plateau = NULL;\n\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n plateau = (Pion *)malloc(NB_LIGNES * NB_COLONNES * sizeof(Pion));\n\n if(plateau == NULL) {\n printf(\"error: unable to allocate memory\\n\");\n exit(EXIT_FAILURE);\n }\n\n for(i = 0; i < NB_LIGNES; i++) {\n for(j = 0; j < NB_COLONNES; j++) {\n plateau[i * NB_COLONNES + j].couleur = 0;\n plateau[i * NB_COLONNES + j].valeur = 0;\n }\n }\n\n plateau[9 * NB_COLONNES + 5].couleur = 1;\n plateau[9 * NB_COLONNES + 5].valeur = 1;\n\n plateau[9 * NB_COLONNES + 6].couleur = 1;\n plateau[9 * NB_COLONNES + 6].valeur = 2;\n\n plateau[9 * NB_COLONNES + 7].couleur = 1;\n plateau[9 * NB_COLONNES + 7].valeur = 3;\n\n plateau[9 * NB_COLONNES + 8].couleur = 1;\n plateau[9 * NB_COLONNES + 8].valeur = 2;\n\n plateau[9 * NB_COLONNES + 9].couleur = 1;\n plateau[9 * NB_COLONNES + 9].valeur = 1;\n\n plateau[8 * NB_COLONNES + 0].couleur = 1;\n plateau[8 * NB_COLONNES + 0].valeur = 1;\n\n plateau[8 * NB_COLONNES + 1].couleur = 1;\n plateau[8 * NB_COLONNES + 1].valeur = 3;\n\n plateau[8 * NB_COLONNES + 2].couleur = 1;\n plateau[8 * NB_COLONNES + 2].valeur = 3;\n\n plateau[8 * NB_COLONNES + 3].couleur = 1;\n plateau[8 * NB_COLONNES + 3].valeur = 1;\n\n plateau[8 * NB_COLONNES + 6].couleur = 1;\n plateau[8 * NB_COLONNES + 6].valeur = 1;\n\n plateau[8 * NB_COLONNES + 7].couleur = 1;\n plateau[8 * NB_COLONNES + 7].valeur = 1;\n\n plateau[8 * NB_COLONNES + 8].couleur = 1;\n plateau[8 * NB_COLONNES + 8].valeur = 1;\n\n plateau[7 * NB_COLONNES + 1].couleur = 1;\n plateau[7 * NB_COLONNES + 1].valeur = 1;\n\n plateau[7 * NB_COLONNES + 2].couleur = 1;\n plateau[7 * NB_COLONNES + 2].valeur = 1;\n\n plateau[2 * NB_COLONNES + 7].couleur = -1;\n plateau[2 * NB_COLONNES + 7].valeur = 1;\n\n plateau[2 * NB_COLONNES + 8].couleur = -1;\n plateau[2 * NB_COLONNES + 8].valeur = 1;\n\n plateau[1 * NB_COLONNES + 1].couleur = -1;\n plateau[1 * NB_COLONNES + 1].valeur = 1;\n\n plateau[1 * NB_COLONNES + 2].couleur = -1;\n plateau[1 * NB_COLONNES + 2].valeur = 1;\n\n plateau[1 * NB_COLONNES + 3].couleur = -1;\n plateau[1 * NB_COLONNES + 3].valeur = 1;\n\n plateau[1 * NB_COLONNES + 6].couleur = -1;\n plateau[1 * NB_COLONNES + 6].valeur = 1;\n\n plateau[1 * NB_COLONNES + 7].couleur = -1;\n plateau[1 * NB_COLONNES + 7].valeur = 3;\n\n plateau[1 * NB_COLONNES + 8].couleur = -1;\n plateau[1 * NB_COLONNES + 8].valeur = 3;\n\n plateau[1 * NB_COLONNES + 9].couleur = -1;\n plateau[1 * NB_COLONNES + 9].valeur = 1;\n\n plateau[0 * NB_COLONNES + 0].couleur = -1;\n plateau[0 * NB_COLONNES + 0].valeur = 1;\n\n plateau[0 * NB_COLONNES + 1].couleur = -1;\n plateau[0 * NB_COLONNES + 1].valeur = 2;\n\n plateau[0 * NB_COLONNES + 2].couleur = -1;\n plateau[0 * NB_COLONNES + 2].valeur = 3;\n\n plateau[0 * NB_COLONNES + 3].couleur = -1;\n plateau[0 * NB_COLONNES + 3].valeur = 2;\n\n plateau[0 * NB_COLONNES + 4].couleur = -1;\n plateau[0 * NB_COLONNES + 4].valeur = 1;\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n return plateau;\n}\n\nvoid f_affiche_plateau(Pion *plateau)\n{\n int i, j, k;\n\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n printf(\"\\n \");\n\n for(k = 0; k < NB_COLONNES; k++) {\n printf(\"%2c \", f_convert_int2char(k));\n }\n\n printf(\"\\n \");\n\n for(k = 0; k < NB_COLONNES; k++) {\n printf(\"-- \");\n }\n\n printf(\"\\n\");\n\n for(i = NB_LIGNES - 1; i >= 0; i--) {\n printf(\"%2d \", i);\n\n for(j = 0; j < NB_COLONNES; j++) {\n printf(\"|\");\n\n switch(plateau[i * NB_COLONNES + j].couleur) {\n case -1:\n printf(\"%do\", plateau[i * NB_COLONNES + j].valeur);\n break;\n\n case 1:\n printf(\"%dx\", plateau[i * NB_COLONNES + j].valeur);\n break;\n\n default:\n printf(\" \");\n }\n }\n\n printf(\"|\\n \");\n\n for(k = 0; k < NB_COLONNES; k++) {\n printf(\"-- \");\n }\n\n printf(\"\\n\");\n }\n\n printf(\" \");\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n}\n\nint f_gagnant()\n{\n int i, j, somme1 = 0, somme2 = 0;\n\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n//Quelqu'un est-il arrive sur la ligne de l'autre\n for(i = 0; i < NB_COLONNES; i++) {\n if(plateauDeJeu[i].couleur == 1) {\n return 1;\n }\n\n if(plateauDeJeu[(NB_LIGNES - 1)*NB_COLONNES + i].couleur == -1) {\n return -1;\n }\n }\n\n//taille des armees\n for(i = 0; i < NB_LIGNES; i++) {\n for(j = 0; j < NB_COLONNES; j++) {\n if(plateauDeJeu[i * NB_COLONNES + j].couleur == 1) {\n somme1++;\n }\n\n if(plateauDeJeu[i * NB_COLONNES + j].couleur == -1) {\n somme2++;\n }\n }\n }\n\n if(somme1 == 0) {\n return -1;\n }\n\n if(somme2 == 0) {\n return 1;\n }\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n return 0;\n}\n\n\n/**\n* Prend comme argument la ligne et la colonne de la case\n* \tpour laquelle la bataille a lieu\n* Renvoie le couleur du gagnant\n* */\nint f_bataille(int l, int c)\n{\n int i, j, mini, maxi, minj, maxj;\n int somme = 0;\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n mini = l - 1 < 0 ? 0 : l - 1;\n maxi = l + 1 > NB_LIGNES - 1 ? NB_LIGNES - 1 : l + 1;\n minj = c - 1 < 0 ? 0 : c - 1;\n maxj = c + 1 > NB_COLONNES - 1 ? NB_COLONNES - 1 : c + 1;\n\n for(i = mini; i <= maxi; i++) {\n for(j = minj; j <= maxj; j++) {\n somme += plateauDeJeu[i * NB_COLONNES + j].couleur * plateauDeJeu[i * NB_COLONNES + j].valeur;\n }\n }\n\n somme -= plateauDeJeu[l * NB_COLONNES + c].couleur * plateauDeJeu[l * NB_COLONNES + c].valeur;\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n if(somme < 0) {\n return -1;\n }\n\n if(somme > 0) {\n return 1;\n }\n\n return plateauDeJeu[l * NB_COLONNES + c].couleur;\n}\n\n\n/**\n* Prend la ligne et colonne de la case d'origine\n* \tet la ligne et colonne de la case de destination\n* Renvoie 1 en cas d'erreur\n* Renvoie 0 sinon\n* */\nint f_test_mouvement(Pion *plateau, int l1, int c1, int l2, int c2, int couleur)\n{\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n printf(\"de (%d,%d) vers (%d,%d)\\n\", l1, c1, l2, c2);\n#endif\n\n /* Erreur, hors du plateau */\n if(l1 < 0 || l1 >= NB_LIGNES || l2 < 0 || l2 >= NB_LIGNES ||\n c1 < 0 || c1 >= NB_COLONNES || c2 < 0 || c2 >= NB_COLONNES) {\n return 1;\n }\n\n /* Erreur, il n'y a pas de pion a deplacer ou le pion n'appartient pas au joueur*/\n if(plateau[l1 * NB_COLONNES + c1].valeur == 0 || plateau[l1 * NB_COLONNES + c1].couleur != couleur) {\n return 1;\n }\n\n /* Erreur, tentative de tir fratricide */\n if(plateau[l2 * NB_COLONNES + c2].couleur == plateau[l1 * NB_COLONNES + c1].couleur) {\n return 1;\n }\n\n if(l1 - l2 > 1 || l2 - l1 > 1 || c1 - c2 > 1 || c2 - c1 > 1 || (l1 == l2 && c1 == c2)) {\n return 1;\n }\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n return 0;\n}\n\n\n/**\n* Prend la ligne et colonne de la case d'origine\n* \tet la ligne et colonne de la case de destination\n* et effectue le trantement de l'operation demandée\n* Renvoie 1 en cas d'erreur\n* Renvoie 0 sinon\n* */\nint f_bouge_piece(Pion *plateau, int l1, int c1, int l2, int c2, int couleur)\n{\n int gagnant = 0;\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n if(f_test_mouvement(plateau, l1, c1, l2, c2, couleur) != 0) {\n return 1;\n }\n\n /* Cas ou il n'y a personne a l'arrivee */\n if(plateau[l2 * NB_COLONNES + c2].valeur == 0) {\n plateau[l2 * NB_COLONNES + c2].couleur = plateau[l1 * NB_COLONNES + c1].couleur;\n plateau[l2 * NB_COLONNES + c2].valeur = plateau[l1 * NB_COLONNES + c1].valeur;\n plateau[l1 * NB_COLONNES + c1].couleur = 0;\n plateau[l1 * NB_COLONNES + c1].valeur = 0;\n } else {\n gagnant = f_bataille(l2, c2);\n\n /* victoire */\n if(gagnant == couleur) {\n plateau[l2 * NB_COLONNES + c2].couleur = plateau[l1 * NB_COLONNES + c1].couleur;\n plateau[l2 * NB_COLONNES + c2].valeur = plateau[l1 * NB_COLONNES + c1].valeur;\n plateau[l1 * NB_COLONNES + c1].couleur = 0;\n plateau[l1 * NB_COLONNES + c1].valeur = 0;\n }\n /* defaite */\n else if(gagnant != 0) {\n plateau[l1 * NB_COLONNES + c1].couleur = 0;\n plateau[l1 * NB_COLONNES + c1].valeur = 0;\n }\n }\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n return 0;\n}\n\n//Calcul du nombre de pions sur le plateau du joueur\nint f_nbPions(Pion* jeu, int joueur)\n{\n int nbPion = 0;\n int i, j;\n\n for (i = 0; i < NB_COLONNES; ++i) {\n for (j = 0; j < NB_LIGNES; ++j) {\n if (jeu[i * NB_COLONNES + j].couleur == joueur) {\n ++nbPion;\n }\n }\n }\n\n return nbPion;\n}\n\n//Calcul de la valeur de tous les pions du joueur\nint f_valeur(Pion* jeu, int joueur)\n{\n int i, j;\n int valeur = 0;\n\n for (i = 0; i < NB_COLONNES; ++i) {\n for (j = 0; j < NB_LIGNES; ++j) {\n if (jeu[i * NB_COLONNES + j].couleur == joueur) {\n valeur += jeu[i * NB_COLONNES + j].valeur;\n }\n }\n }\n\n return valeur;\n}\n\n//fonction d'évaluation\nint f_eval(Pion* jeu, int joueur)\n{\n int dist_diff = 0; // sum of players distances from the opposite player's goal line\n int val_diff = 0;\n\n for (int i = 0; i < NB_LIGNES; i++) {\n for (int j = 0; j < NB_COLONNES; j++) {\n int col = jeu[i * NB_COLONNES + j].couleur;\n int val = jeu[i * NB_COLONNES + j].valeur;\n\n // ignore empty cells\n if(col != 0) {\n int goal = (col == -1) ? 10 : -1; // goal line that the pawns must seek\n\n // distance is reversed because it is inversly correlated with evaluation\n // ie: if the distance is big the evaluation should be small and if\n // the distance is small the evaluation should be big\n int inv_dist = (10 - abs(goal - i));\n int is_player = (col == joueur) ? 1 : -1;\n\n dist_diff += is_player * inv_dist;\n\n // difference of the sum of the players pawn values and the sum of the\n // opponents pawn values\n val_diff += is_player * val;\n }\n }\n }\n\n int mult_factor = 75; // this value should represent how much more important the value\n // difference is from the distance difference\n /*\n // to randomize played games\n // expected to either add or substract 1 (uniformly) from the evaluation every (rand_step) calls\n int rand_step = 10;\n int rand_add = ((rand() % 2) ? 1 : -1) * ((rand() % rand_step == 0) ? 1 : 0);\n */\n return val_diff * mult_factor + dist_diff;// + rand_add;\n}\n\n//copie du plateau\nvoid f_copie_plateau(Pion* source, Pion* destination)\n{\n int i, j;\n\n for (i = 0; i < NB_LIGNES; i++) {\n for (j = 0; j < NB_COLONNES; j++) {\n destination[i * NB_COLONNES + j].couleur = source[i * NB_COLONNES + j].couleur;\n destination[i * NB_COLONNES + j].valeur = source[i * NB_COLONNES + j].valeur;\n }\n }\n}\n\n//mise a zero du plateau\nPion* f_raz_plateau()\n{\n Pion* jeu = NULL;\n int i, j;\n jeu = (Pion *) malloc(NB_LIGNES * NB_COLONNES * sizeof (Pion));\n\n for (i = 0; i < NB_LIGNES; i++) {\n for (j = 0; j < NB_COLONNES; j++) {\n jeu[i * NB_COLONNES + j].couleur = 0;\n jeu[i * NB_COLONNES + j].valeur = 0;\n }\n }\n\n return jeu;\n}\n\n// global variable to store the ai move\nint fromX, fromY;\nint toX, toY;\n\n// to store the collected stats\nstruct {\n long num_tested_moves; // number of all node visits from all calls\n int num_AI_calls; // number of all f_IA calls\n double total_elapsed_time; // in seconds (only execution of f_negamax or f_negamax_ab is counted)\n int num_searched_nodes;\n} stats;\n\nvoid f_affiche_stats()\n{\n FILE* output_file = fopen(\"output.csv\", \"a\");\n double average_elapsed_time = 1000.0 * stats.total_elapsed_time / stats.num_AI_calls;\n double average_moves_per_turn = (double)stats.num_tested_moves / stats.num_searched_nodes;\n double average_nodes_per_call = (double)stats.num_searched_nodes / stats.num_AI_calls;\n\n // write to csv file to draw it\n fprintf(output_file, \"%s:%d:%g:%g:%g\\n\", (ALPHA_BETA) ? \"true\" : \"false\", PROF_MAX, average_moves_per_turn, average_elapsed_time, average_nodes_per_call);\n\n printf(\"****** STATISTIQUES ******\\n\");\n printf(\"WITH ALPHA BETA = %s\\n\", (ALPHA_BETA) ? \"yes\" : \"no\");\n printf(\"MAX DEPTH = %d\\n\", PROF_MAX);\n printf(\"NUM OF AI CALLS = %d function calls\\n\", stats.num_AI_calls);\n printf(\"AVERAGE MOVES PER TURN= %g nodes\\n\", average_moves_per_turn) ;\n printf(\"AVERAGE NODES PER CALL = %g nodes\\n\", average_nodes_per_call) ;\n printf(\"AVERAGE ELPASED TIME = %.2fms\\n\", average_elapsed_time);\n printf(\"**************************\\n\");\n}\n\nint f_win_check(int joueur, int* player_counter, int* opponent_counter,\n int voisinX, int old_dest, int new_dest)\n{\n\n int goal = (joueur == 1) ? 0 : 9; // goal line\n\n if(voisinX == goal) { // if we moved to the goal line the node is winning and therefore terminal\n return 1;\n }\n\n // update pawn counters only if the destination\n // coordinated had an opponent's pawn\n if(old_dest == -joueur) {\n // check the destination coordinate for who won the battle\n if(new_dest == joueur) { // if the player pawn won\n (*opponent_counter)--; // the opponent loses a pawn\n } else {\n (*player_counter)--; // otherwise we lost one\n }\n\n // check if either pawn counters are 0\n if((*player_counter) <= 0) {\n return -1; // if we lost our last pawn in a failed attack\n }\n\n if((*opponent_counter) <= 0) {\n return 1; // if we successfully finished all opponent pawns\n }\n }\n\n return 0; // neither won\n}\n\n\n// algo negamax sans alpha beta\nint f_negamax(Pion* plateau_courant, int profondeur, int joueur, int player_counter, int opponent_counter)\n{\n // update stats; adding a move to the total\n stats.num_tested_moves++;\n\n if(profondeur <= 0) {\n return f_eval(plateau_courant, joueur);\n }\n\n // update stats; adding a node search call to the total\n stats.num_searched_nodes ++;\n\n int maxval = -INFINI;\n\n int has_next = 1; // flag to signal a terminal node (no possible moves)\n\n Pion* plateau_suivant = (Pion *) malloc(NB_LIGNES * NB_COLONNES * sizeof (Pion));\n f_copie_plateau(plateau_courant, plateau_suivant);\n\n for (int i = 0; i < NB_LIGNES; i++) {\n for (int j = 0; j < NB_COLONNES; j++) {\n\n // move either the player or the opponent's pawns depending on\n // current color\n if(plateau_courant[i * NB_COLONNES + j].couleur == joueur)\n\n for(int x = -1; x <= 1; x++) {\n for(int y = -1; y <= 1; y++) {\n int voisinX = i + x, voisinY = j + y;\n\n // try moving the pawn\n if(f_bouge_piece(plateau_suivant, i, j, voisinX, voisinY, joueur) == 0) {\n // win check\n int new_dest = plateau_suivant[i * NB_COLONNES + j].couleur; // new destination pawn\n int old_dest = plateau_courant[i * NB_COLONNES + j].couleur; // old destination pawn\n\n int win_check = f_win_check(joueur, &player_counter, &opponent_counter, voisinX, old_dest, new_dest);\n\n // if any of the two won\n if(win_check) {\n return win_check * INFINI;\n }\n\n has_next = 0; // if there is at least one move we unset the flag\n\n\n int newval = -f_negamax(plateau_suivant, profondeur - 1, -joueur, player_counter, opponent_counter);\n\n // update the maximum value and a quick way to randomize which maximum is\n // chosen WARNING: not uniform\n int rand_step = 2;\n\n if((rand() % rand_step == 0) ? newval >= maxval : newval > maxval) {\n maxval = newval;\n\n // set the move\n if(profondeur == PROF_MAX) {\n fromX = i, fromY = j;\n toX = voisinX, toY = voisinY;\n }\n }\n\n // undo the move:\n // put the origin and destination cell pieces back\n // (in case there was an attack; ie: neighboring\n // cell had an enemy pawn)\n\n plateau_suivant[i * NB_COLONNES + j ] = plateau_courant[i * NB_COLONNES + j ];\n plateau_suivant[voisinX * NB_COLONNES + voisinY] = plateau_courant[voisinX * NB_COLONNES + voisinY];\n\n // f_bouge_piece(plateau_suivant, voisinX, voisinY, i, j, joueur);\n }\n\n }\n }\n }\n }\n\n\n free(plateau_suivant);\n\n // if the node is terminal return the evaluation of the current node\n if(!has_next) {\n return f_eval(plateau_courant, joueur);\n }\n\n return maxval;\n}\n\n// algo negamax avec alpha beta maximize toujours pour le joueur donné\nint f_negamax_ab(Pion* plateau_courant, int profondeur, int joueur, int alpha, int beta, int player_counter, int opponent_counter)\n{\n // update stats\n stats.num_tested_moves++;\n\n if(profondeur <= 0) {\n return f_eval(plateau_courant, joueur);\n }\n\n // update stats; adding a node search call to the total\n stats.num_searched_nodes ++;\n\n int maxval = -INFINI;\n\n int has_next = 1; // in case the node is terminal\n\n Pion* plateau_suivant = (Pion *) malloc(NB_LIGNES * NB_COLONNES * sizeof (Pion));\n f_copie_plateau(plateau_courant, plateau_suivant);\n\n for (int i = 0; i < NB_LIGNES; i++) {\n for (int j = 0; j < NB_COLONNES; j++) {\n\n // only move the player's pawns\n if(plateau_courant[i * NB_COLONNES + j].couleur == joueur) {\n\n for(int x = -1; x <= 1; x++) {\n for(int y = -1; y <= 1; y++) {\n int voisinX = i + x, voisinY = j + y;\n\n // try moving the pawn\n if(f_bouge_piece(plateau_suivant, i, j, voisinX, voisinY, joueur) == 0) {\n // win check\n int new_dest = plateau_suivant[i * NB_COLONNES + j].couleur; // new destination pawn\n int old_dest = plateau_courant[i * NB_COLONNES + j].couleur; // old destination pawn\n\n int win_check = f_win_check(joueur, &player_counter, &opponent_counter, voisinX, old_dest, new_dest);\n\n // if any of the two won\n if(win_check) {\n return win_check * 10000;\n }\n\n has_next = 0; // if theres at least one possible move the node is not terminal\n\n\n int newval = -f_negamax_ab(plateau_suivant, profondeur - 1, -joueur, -beta, -alpha, player_counter, opponent_counter); // note that the alpha and beta are reversed\n\n // update the maximum value and randomize which maximum is\n // chosen\n int rand_step = 5;\n\n if((rand() % rand_step == 0) ? newval >= maxval : newval > maxval) {\n maxval = newval;\n\n // set the move\n if(profondeur == PROF_MAX) {\n fromX = i, fromY = j ;\n toX = voisinX, toY = voisinY;\n }\n }\n\n // trim the alpha value\n if(alpha < maxval) {\n alpha = maxval;\n }\n\n // cut off if the [alpha, beta] range is empty\n if(alpha >= beta) {\n goto CLEAN_EXIT; // goto to exit all four loops\n }\n\n // undo the move:\n // put the origin and destination cell piece back\n // (in case there was an attack; ie: neighboring\n // cell had an enemy pawn)\n\n plateau_suivant[i * NB_COLONNES + j ] = plateau_courant[i * NB_COLONNES + j ];\n plateau_suivant[voisinX * NB_COLONNES + voisinY] = plateau_courant[voisinX * NB_COLONNES + voisinY];\n\n // f_bouge_piece(plateau_suivant, voisinX, voisinY, i, j, joueur);\n }\n\n }\n }\n\n }\n\n }\n }\n\n\n// to perform clean exits when cutting branches off\nCLEAN_EXIT:\n free(plateau_suivant);\n\n // if the node is terminal return the evaluation of the current node\n if(!has_next) {\n return f_eval(plateau_courant, joueur);\n }\n\n return maxval;\n}\n\n/**\n * Calcule et joue le meilleur cout\n * */\nvoid f_IA(int joueur)\n{\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n\n /* ******************************** WARNING **********************************\n * ******************** THIS IS NOT GOOD NEEDS TO CHANGE *********************\n * What i'm doing here is reverse the board and treat it as if it was the o's\n * turn. this is done because i couldn't figure out the problem with the x player\n * always losing and sacrificing its pawns this seems to be at least\n * a temporary fix, given i didn't have time to fix the real problem in\n * f_negamax_ab and f_negamax.\n * ************************************************************************ */\n/////////////////////\n Pion* reversedBoard = f_raz_plateau();\n\n if(joueur == -1) {\n f_copie_plateau(plateauDeJeu, reversedBoard);\n } else {\n for(int i = 0; i < NB_LIGNES; i++) {\n for(int j = 0; j < NB_COLONNES; j++) {\n reversedBoard[(NB_LIGNES - 1 - i)*NB_COLONNES + j].valeur = plateauDeJeu[i * NB_COLONNES + j].valeur;\n reversedBoard[(NB_LIGNES - 1 - i)*NB_COLONNES + j].couleur = -plateauDeJeu[i * NB_COLONNES + j].couleur;\n }\n }\n }\n\n int joueur2 = -1;\n////////////////////\n\n int eval; // returned value for minimax evaluation\n\n // benchmarking\n clock_t strt, end;\n strt = clock();\n\n int player_counter = f_nbPions(reversedBoard, joueur2);\n int opponent_counter = f_nbPions(reversedBoard, -joueur2);\n\n#if ALPHA_BETA==1\n eval = f_negamax_ab(reversedBoard, PROF_MAX, joueur2, -INFINI, INFINI, player_counter, opponent_counter);\n#else\n eval = f_negamax(reversedBoard, PROF_MAX, joueur2, player_counter, opponent_counter);\n#endif\n\n end = clock();\n\n //update stats\n stats.num_AI_calls ++;\n stats.total_elapsed_time += ((double) end - strt) / CLOCKS_PER_SEC;\n\n\n /* Here i reverse back the command depending on the current player to get\n * the real move decision, as said above this NEEDS to change */\n ///////////////////////////////////////////////\n // reverse back\n if(joueur == 1) {\n f_bouge_piece(plateauDeJeu, NB_LIGNES - 1 - fromX, fromY, NB_LIGNES - 1 - toX, toY, joueur);\n } else {\n f_bouge_piece(plateauDeJeu, fromX, fromY, toX, toY, joueur);\n }\n\n free(reversedBoard);\n ////////////////////////////////////////////\n\n printf(\"\\n IA move for %c with eval %d: %d%c%d%c\\n\", (joueur == 1) ? 'x' : 'o', eval,\n fromX, f_convert_int2char(fromY),\n toX, f_convert_int2char(toY));\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n}\n\n\n/**\n * Demande le choix du joueur humain et calcule le coup demande\n * */\nvoid f_humain(int joueur)\n{\n char c1, c2;\n char buffer[32];\n int l1, l2;\n\n\n#ifdef DEBUG\n printf(\"dbg: entering %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n printf(\"joueur \");\n\n switch(joueur) {\n case -1:\n printf(\"o \");\n break;\n\n case 1:\n printf(\"x \");\n break;\n\n default:\n printf(\"inconnu \");\n }\n\n printf(\"joue:\\n\");\n\n while(1) {\n fgets(buffer, 32, stdin);\n\n if(sscanf(buffer, \"%c%i%c%i\\n\", &c1, &l1, &c2, &l2) == 4) {\n if(f_bouge_piece(plateauDeJeu, l1, f_convert_char2int(c1), l2, f_convert_char2int(c2), joueur) == 0) {\n break;\n }\n }\n\n fflush(stdin);\n printf(\"mauvais choix\\n\");\n }\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n}\n\nint main(int argv, char *argc[])\n{\n // random seed\n srand(time(0));\n\n int fin = 0, mode = 0, ret, joueur = 1;\n printf(\"1 humain vs IA\\n2 humain vs humain\\n3 IA vs IA\\n\");\n scanf(\"%d\", &mode);\n\n plateauDeJeu = f_init_plateau();\n\n while (!fin) {\n f_affiche_plateau(plateauDeJeu);\n\n if(mode == 1) {\n if(joueur > 0) {\n f_humain(joueur);\n } else {\n f_IA(joueur);\n }\n } else if(mode == 2) {\n f_humain(joueur);\n } else {\n f_IA(joueur);\n }\n\n if ((ret = f_gagnant()) != 0) {\n switch (ret) {\n case 1:\n f_affiche_plateau(plateauDeJeu);\n printf(\"joueur x gagne!\\n\");\n fin = 1;\n break;\n\n case -1:\n f_affiche_plateau(plateauDeJeu);\n printf(\"joueur o gagne!\\n\");\n fin = 1;\n break;\n }\n }\n\n joueur = -joueur;\n }\n\n // write statistics\n f_affiche_stats();\n\n#ifdef DEBUG\n printf(\"dbg: exiting %s %d\\n\", __FUNCTION__, __LINE__);\n#endif\n\n return 0;\n}\n" }, { "alpha_fraction": 0.6312848925590515, "alphanum_fraction": 0.6340782046318054, "avg_line_length": 15.272727012634277, "blob_id": "2c6b9d2459e1db5536be6ebb9a69f7827c121e26", "content_id": "2dbac5c41ee18e8787d34fd27d0f67695e766bc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 358, "license_type": "no_license", "max_line_length": 48, "num_lines": 22, "path": "/IA/1TP/Makefile", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "CC=gcc\nDEBUGGER=gdb\nPARAMS=-Wall\nBIN=wargame\nDOC=doc\n\nall: wargame.c\n\t$(CC) $(PARAMS) -o $(BIN) $^\n\ndebug: wargame.c\n\t$(CC) $(PARAMS) -g -o $(BIN) $^\n\t$(DEBUGGER) ./$(BIN)\n\ndoc: $(DOC)/rapport.tex genplots\n\tcd $(DOC) && pdflatex rapport.tex # compile pdf\n\ngenplots: plot.py output.csv\n\tpython3 $^ # generate the plots\n\n.PHONY: clean\nclean:\n\trm -f *.o $(BIN)\n" }, { "alpha_fraction": 0.5662100315093994, "alphanum_fraction": 0.6027397513389587, "avg_line_length": 15.615385055541992, "blob_id": "3ef9dc506eb3bd49162d4e7abd60832c6f2a35b2", "content_id": "2978217466ada934c65ee44a6f8c8e3e1bb38a9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 219, "license_type": "no_license", "max_line_length": 61, "num_lines": 13, "path": "/TSI/TI_M1_2020/NRC/makefile", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "FLAGS=-Wall -g \n\nall: tp1\n\ntp1.o: tp1.c \n\tgcc $(FLAGS) -c tp1.c nrio.c nralloc.c nrarith.c -lm -w \n\ntp1: tp1.o \n\tgcc $(FLAGS) -o tp1 tp1.o nrio.c nralloc.c nrarith.c -lm -w \n\n.PHONY: clean all\nclean: \n\trm -f -rf *.o\n\n\n\n" }, { "alpha_fraction": 0.3485254645347595, "alphanum_fraction": 0.42493298649787903, "avg_line_length": 26.629629135131836, "blob_id": "b131720a383e630fa0661e2882cc26c933e95fd8", "content_id": "595c1ccebdd063e184f07ddc2e4a1d1ad2530245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 58, "num_lines": 27, "path": "/IA/2TP/doc/plot.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as pp\n\nsensor = {\"close\": {\"x\":[300 , 350, 1023], \"y\":[0, 1, 1]},\n \"half\": {\"x\":[150 , 250, 350 ], \"y\":[0, 1, 0]},\n \"far\": {\"x\":[0 , 100, 250 ], \"y\":[1, 1, 0]}}\n\nmotor = {\"reverse\":{\"x\":[-5 , -4 , -3 ], \"y\":[1, 1, 0]},\n \"stop\" :{\"x\":[-0.3, 0 , 0.3 ], \"y\":[0, 1, 0]},\n \"high\" :{\"x\":[3 , 4 , 5 ], \"y\":[0, 1, 1]}}\n\ndef plt_fuzzy(f, n):\n fig, ax = pp.subplots()\n\n xvals = []\n for name in f:\n s = f[name]\n xvals += s[\"x\"]\n pp.plot(s[\"x\"], s[\"y\"], label=name)\n \n ax.xaxis.set_ticks(list(set(xvals)))\n \n pp.xticks(rotation=90)\n pp.legend(loc=\"best\")\n pp.savefig(n + \".pdf\")\n\nplt_fuzzy(sensor, \"sensor\")\nplt_fuzzy(motor, \"motor\")\n" }, { "alpha_fraction": 0.6029723882675171, "alphanum_fraction": 0.6029723882675171, "avg_line_length": 13.71875, "blob_id": "d6ee70787d7bcabac6ecdc8b0e919d5dfdd8a259", "content_id": "00f90642cf3222d10ae09d2644c534d0660ac1b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 471, "license_type": "no_license", "max_line_length": 42, "num_lines": 32, "path": "/IA/4TP/TP4_IA_Kohonen_2018_2019_JA_NB/tp4_base_code/Makefile", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "CC=gcc\nLIBS=-lm -lglut -lGL -lGLU\nFLAGS=-g -Wall $(pkg-config --cflags glu)\nBIN=main\n\nall: $(BIN)\n\n$(BIN): base_opengl.o ppm.o kohonen.o\n\t$(CC) $(FLAGS) $^ -o $(BIN) $(LIBS)\n\nppm.o: ppm.c ppm.h\n\t$(CC) $(FLAGS) -c $<\n\nbase_opengl.o: base_opengl.c base_opengl.h\n\t$(CC) $(FLAGS) -c $<\n\nkohonen.o: kohonen.c kohonen.h\n\t$(CC) $(FLAGS) -c $<\n\nbase_opengl.h:\n\ttouch base_opengl.h\n\nppm.h: \n\ttouch ppm.h\n\nkohonen.h:\n\ttouch kohonen.h\n\nclean :\n\trm -f $(BIN) *.o\n\n.PHONY: clean all\n" }, { "alpha_fraction": 0.712382435798645, "alphanum_fraction": 0.7288401126861572, "avg_line_length": 26.148935317993164, "blob_id": "f57c71e6bf6c66910ed4e35313e6425af91cd62e", "content_id": "c5845f2b169900e95c92fe9b94c9508656d0d069", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 1276, "license_type": "no_license", "max_line_length": 67, "num_lines": 47, "path": "/BDAplus/BDAplus/create_r_db.sql", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "CREATE TABLE CountryCode (\n NOC VARCHAR(3) NOT NULL PRIMARY KEY,\n country_name VARCHAR(64) NOT NULL ,\n ISOCode VARCHAR(2) NOT NULL\n);\n\nCREATE TABLE City (\n city_id INT NOT NULL UNIQUE PRIMARY KEY,\n city_name VARCHAR(64) NOT NULL,\n NOC VARCHAR(3) NOT NULL REFERENCES CountryCode(NOC)\n);\n\nCREATE TABLE Discipline (\n\tdiscipline_id INT NOT NULL UNIQUE PRIMARY KEY,\n sport_name VARCHAR(64),\n discipline_name VARCHAR(64)\n);\n\nCREATE TABLE Event(\n event_id INT NOT NULL UNIQUE PRIMARY KEY,\n event_name VARCHAR(64),\n event_gender VARCHAR(1),\n edition INT,\n city_id INT NOT NULL REFERENCES City(city_id)\n);\n\n-- Many to Many for Event and discipline\nCREATE TABLE EventActivity (\n\tevent_activity_id INT NOT NULL UNIQUE PRIMARY KEY,\n discipline_id INT NOT NULL REFERENCES Discipline(discipline_id),\n event_id INT NOT NULL REFERENCES Event(event_id)\n);\n\n\nCREATE TABLE Athlete(\n athlete_id INT NOT NULL UNIQUE PRIMARY KEY,\n athlete_name VARCHAR(64) NOT NULL,\n athlete_gender VARCHAR(10)\n);\n\nCREATE TABLE Medal (\n medal_id INT NOT NULL UNIQUE PRIMARY KEY,\n athlete_id INT NOT NULL REFERENCES Athlete(athlete_id),\n event_id INT NOT NULL REFERENCES Event(event_id),\n represent_NOC VARCHAR(3) REFERENCES CountryCode(NOC),\n medal_type VARCHAR(10)\n);\n" }, { "alpha_fraction": 0.5505097508430481, "alphanum_fraction": 0.5792400240898132, "avg_line_length": 18.14285659790039, "blob_id": "cbe638bb1d5d6edc07c8d127d15607fe34e96500", "content_id": "5216ac43f1724dba6464f015b8840c77d5d6b9f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1079, "license_type": "no_license", "max_line_length": 82, "num_lines": 56, "path": "/PSSR/3TD/miller_rabin.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "\nfrom random import *\n\ndef miller_rabin(n, k=10):\n\t\"\"\"\n\tFonction implementant le test de primalite de Miller Rabin.\n\tn est le nombre teste\n\tk est le nombre d'iterations de test effectuees (fixe par defaut a 10).\n\t\"\"\"\n\tif n == 2:\n\t\treturn True\n\tif not n & 1:\n\t\treturn False\n\n\tdef temoin(a, s, d, n):\n\t\tx = pow(a, d, n)\n\t\tif x == 1:\n\t\t\treturn False\n\t\tfor i in range(s - 1):\n\t\t\tif x == n - 1:\n\t\t\t\treturn False\n\t\t\tx = pow(x, 2, n)\n\t\treturn x != n - 1\n\n\ts = 0\n\td = n - 1\n\n\twhile d % 2 == 0:\n\t\td >>= 1\n\t\ts += 1\n\n\tfor i in range(k):\n\t\ta = randrange(2, n - 1)\n\t\tif temoin(a, s, d, n):\n\t\t\treturn False\n\treturn True\n\n\n\ndef getPrime(bits):\n\t\"\"\"\n\tFonction qui retourne un nombre premier du nombre de bits choisi\n\t\"\"\"\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p\n\t\t\t\n\nif __name__ == \"__main__\":\n bits = 1000\n N = 10\n k = 100\n nums = [getrandbits(bits) for _ in range(N)]\n for num in nums:\n print(\"prime \" if miller_rabin(num, k) else \"not prime \", num)\n\n \n" }, { "alpha_fraction": 0.7831325531005859, "alphanum_fraction": 0.7951807379722595, "avg_line_length": 40.5, "blob_id": "46a008a8e54eb1dd4d27920a643139c2974f4acd", "content_id": "fded9fddd85a578c4208e699263c637e22d3b6e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 76, "num_lines": 2, "path": "/IA/2TP/TP2_IA/README.md", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "Pour compiler le robot fait un make dans le répertoire SIM2 puis lancer avec\n./sim\n" }, { "alpha_fraction": 0.36752137541770935, "alphanum_fraction": 0.39316239953041077, "avg_line_length": 22.399999618530273, "blob_id": "a22a8c941c16c2c17cdd956029723ca585b79282", "content_id": "20c7b2159de483737a8d71eabb9464a29bad0d01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/PSSR/5TD/viterbi.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "def viterbi(n, T, M, X0, A):\n T = [[]]\n pred = [[]]\n for i in range(n):\n T[0].append(X0[i] * A[i][])\n" }, { "alpha_fraction": 0.6453313231468201, "alphanum_fraction": 0.6521084308624268, "avg_line_length": 30.619047164916992, "blob_id": "9642200a9a96ac4063596fcb5cb5d951fb00025e", "content_id": "b0f574468f9be4211dd5127752282c7b7df7d14f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 88, "num_lines": 42, "path": "/IA/1TP/plot.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- \nimport matplotlib.pyplot as pp\nimport pandas as pd\nimport numpy as np\n\ncols = [\"with_ab\", \"depth\",\"avg_moves\", \"avg_time\", \"avg_nodes\"]\ndata = pd.read_csv(\"output.csv\", delimiter=\":\", names=cols)\n\nwith_ab = data.query(\"with_ab\")\\\n .groupby('depth', as_index=False)\\\n .mean()\\\n .sort_values(by='depth')\n\nwithout_ab = data.query(\"not with_ab\")\\\n .groupby('depth', as_index=False)\\\n .mean()\\\n .sort_values(by='depth')\n\npp.title(\"Comparaison entre negamax avec et sans alpha beta\")\npp.xlabel(\"Profondeur\")\npp.ylabel(\"Nombre des noueds traités (Knodes)\")\n\npp.plot(with_ab['depth'], with_ab['avg_nodes'].div(1000), label='avec alpha/beta')\npp.plot(without_ab['depth'], without_ab['avg_nodes'].div(1000), label='sans alpha/beta')\npp.legend(loc='best')\n#pp.show()\npp.savefig(\"doc/comp_nodes.pdf\")\nprint('generated doc/comp_nodes.pdf')\n\npp.cla()\npp.title(\"Comparaison entre negamax avec et sans alpha beta\")\npp.xlabel(\"Profondeur\")\npp.ylabel(\"Temps d'execution (ms)\")\n\npp.plot(with_ab['depth'], with_ab['avg_time'], label='avec alpha/beta')\npp.plot(without_ab['depth'], without_ab['avg_time'], label='sans alpha/beta')\n\npp.legend(loc='best')\n#pp.show()\npp.savefig(\"doc/comp_times.pdf\")\nprint('generated doc/comp_times.pdf')\n" }, { "alpha_fraction": 0.6134157180786133, "alphanum_fraction": 0.6213592290878296, "avg_line_length": 26.634145736694336, "blob_id": "8415a61eadf2ccd6faab20155f9e19b550365860", "content_id": "320d8807080bd01c0332cdace8047f10207fe7f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1133, "license_type": "no_license", "max_line_length": 127, "num_lines": 41, "path": "/IE/TP3/img_info.cpp", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <ctime>\n#include <iostream>\n#include <raspicam/raspicam_cv.h>\n\nusing namespace std;\n\nint main(int argc, char **argv)\n{\n\ttime_t timer_begin, timer_end;\n\traspicam::RaspiCam_Cv Camera;\n\tcv::Mat image;\n\tint nCount = 100;\n\t// set cam params\n\tCamera.set(CV_CAP_PROP_FORMAT, CV_8UC1);\n\t// Open camera\n\tcout << \"Opening camera...\" << endl;\n\tif(!Camera.open()) {\n\t\tcerr << \"Error opening the camera\" << endl;\n\t\treturn -1;\n\t}\n\n\t// Start capture\n\tcout << \"Capturing \" << nCount << \" frames ....\" << endl;\n\ttime(&timer_begin);\n\tfor(int i = 0; i < nCount; i++) {\n\t\tCamera.grab();\n\t\tCamera.retrieve(image);\n\t\tif(i % 5 == 0) cout << \"\\r captured \"<<i<<\" images\"<<std::flush;\n\t}\n\n\tcout<<\"stop camera...\"<<endl;\n\tCamera.release();\n\t//show time statistics\n\ttime(&timer_end);\n\n\tdouble secondsElapsed = difftime(timer_end, timer_begin);\n\tcout << secondsElapsed << \" seconds for \" << nCount << \" frames: FPS=\" << (float) ((float) (nCount) / secondsElapsed) << endl;\n\tcv::imwrite(\"raspicam_cv_image.jpg\", image);\n\tcout << \"Image saved at raspicam_cv_image.jpg\"<<endl;\n\tcout << \"Image size (\" << image.rows << \", \" << image.cols << \")\" << endl;\n}\n" }, { "alpha_fraction": 0.7269841432571411, "alphanum_fraction": 0.7269841432571411, "avg_line_length": 25.25, "blob_id": "81d794c2005a3c5aa648c25eb51b58e1c379251c", "content_id": "bc534958e71c01c330f8b55b0a50f91a26302410", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 315, "license_type": "no_license", "max_line_length": 54, "num_lines": 12, "path": "/BDAplus/BDAplus/destroy_graph_db.cql", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "drop index on :Country(name);\ndrop index on :City(name);\ndrop index on :Sport(name);\ndrop index on :Discipline(name);\ndrop index on :Athlete(name);\ndrop index on :Event(name);\ndrop index on :Year(name);\ndrop index on :Gender(name);\ndrop constraint on (c:Country) assert c.noc is unique;\n\nMATCH (n)\nDETACH DELETE n;\n" }, { "alpha_fraction": 0.630771815776825, "alphanum_fraction": 0.6339737176895142, "avg_line_length": 40.78873062133789, "blob_id": "3340ba26b08944ccf513553927fc654b66960891", "content_id": "c81b1f2c0a67581c17418dc1b311c1207cac734c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5939, "license_type": "no_license", "max_line_length": 108, "num_lines": 142, "path": "/BDAplus/insert_r_db.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\nimport psycopg2\nimport sys\nimport pprint\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import text as sa_text\nfrom io import StringIO\nimport csv\n\n# To accelerate the insertion using pandas's to_sql\n# https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#io-sql-method\ndef psql_insert_copy(table, conn, keys, data_iter):\n # gets a DBAPI connection that can provide a cursor\n dbapi_conn = conn.connection\n with dbapi_conn.cursor() as cur:\n s_buf = StringIO()\n writer = csv.writer(s_buf)\n writer.writerows(data_iter)\n s_buf.seek(0)\n\n columns = ', '.join('\"{}\"'.format(k) for k in keys)\n if table.schema:\n table_name = '{}.{}'.format(table.schema, table.name)\n else:\n table_name = table.name\n\n sql = 'COPY {} ({}) FROM STDIN WITH CSV'.format(\n table_name, columns)\n cur.copy_expert(sql=sql, file=s_buf)\n\ndef main():\n db_user = 'postgres' # username\n db_pass = '123456' # password\n db_host = '127.0.0.1' # hostname\n db_name = 'olympics' # Database name\n \n # Constructing the connection string\n conn_string = f'postgresql+psycopg2://{db_user}:{db_pass}@{db_host}/{db_name}'\n print(f\"Connecting to database\\n\\t-> {conn_string}\")\n # Create the db engine\n engine = create_engine(conn_string, pool_recycle=3600)\n \n\n ## utility lambda functions\n lowercase = lambda a: [e.lower() for e in a] # tranform array of strings to lowercase\n\n\n ### READING THE CSV FILES AND RESHAPING\n\n ## CITIES.CSV\n cities = pd.read_csv('olympics_data/cities.csv', header=0)\n cities = cities.rename(columns={'City': 'city_name'}) # changed to conform to db\n del cities['Country'] # conforming to db\n cities.columns = lowercase(cities.columns)\n cities['city_id'] = cities.index\n\n \n\n ## IOCCOUNTRYCODES.CSV\n # Here the na_filter=False is necessary since the country code for namibia\n # is NA which is interpreted as NaN by pandas\n ioc = pd.read_csv('olympics_data/ioccountrycodes.csv', header=0, na_filter=False)\n ioc = ioc.rename(columns={'Country': 'Country_name'})\n ioc.columns = lowercase(ioc.columns)\n\n print(ioc)\n\n\n # SPORTSTAXONOMY.CSV\n sports = pd.read_csv('olympics_data/sportstaxonomy.csv', header=0)\n sports = sports.rename(columns={'Sport': 'sport_name', 'Discipline': \"discipline_name\"}) \n sports['discipline_id'] = sports.index\n\n\n\n ## MEDALLISTS.CSV\n # For some reason the first two lines of the medallist csv are empty\n # (ignoring the comments)\n medal = pd.read_csv('olympics_data/medallists.csv', header=2, comment='\"')\n\n # Extracting Athletes\n athlete = medal[['Athlete','Gender']].drop_duplicates().reset_index(drop=True)\n athlete = athlete.rename(columns={'Athlete': 'athlete_name', 'Gender': 'athlete_gender'})\n athlete['athlete_id'] = athlete.index\n\n # Extracting events\n event = medal[['City', 'Event', 'Event_gender', 'Discipline', 'Edition']]\n event = event.rename(columns={'City':'city_name', 'Discipline': 'discipline_name'})\n event = event.merge(cities, on='city_name').merge(sports, on='discipline_name')\n event = event.drop(['city_name', 'discipline_name', 'sport_name', 'noc'], axis=1)\n event = event.reset_index(drop=True)\n event = event.rename(columns={'Event':'event_name', 'Event_gender':'event_gender', 'Edition':'edition'})\n event['event_id'] = event.index\n\n # Extracting medals\n medals = medal.rename(columns={'Event_gender': 'event_gender', \n 'NOC': 'noc',\n 'Event':'event_name',\n 'Edition':'edition',\n 'City':'city_name',\n 'Discipline': 'discipline_name',\n 'Athlete': 'athlete_name',\n 'Gender': 'athlete_gender'})\n\n medals = medals.merge(athlete, on=['athlete_name', 'athlete_gender'])\n medals = medals.merge(sports, on='discipline_name')\n medals = medals.merge(cities, on=['city_name', 'noc'])\n medals = medals.merge(event, on=['event_gender', 'event_name', 'edition', 'discipline_id', 'city_id'])\n medals = medals[['athlete_id', 'event_id', 'noc', 'Medal']]\n medals = medals.rename(columns={'noc':'represent_noc', 'Medal':'medal_type'})\n \n \n \n # dropping the existing data and inserting new the new ones\n for table in [\"City\", \"CountryCode\", \"Discipline\", \"Medal\", \"Athlete\", \"Event\", \"Medal\"]:\n engine.execute(sa_text(f\"TRUNCATE TABLE {table} CASCADE\").execution_options(autocommit=True))\n\n ## Inserting the data frames to postgresql\n \n # inserting cities\n cities.to_sql('City', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n # inserting country codes\n ioc.to_sql('CountryCode', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n # inserting sports\n sports.to_sql('Discipline', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n # inserting athletes\n athlete.to_sql('Athlete', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n # inserting event\n event.to_sql('Event', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n # inserting medal\n medals.to_sql('Medal', con=engine, if_exists='replace', index=False, method=psql_insert_copy)\n\n #print(pd.read_sql(sql='SELECT * FROM City', con=engine))\n #print(pd.read_sql(sql='SELECT * FROM CountryCode', con=engine))\n #print(pd.read_sql(sql='SELECT * FROM Discipline', con=engine))\n #print(pd.read_sql(sql='SELECT * FROM Athlete', con=engine))\n #print(pd.read_sql(sql='SELECT * FROM Event', con=engine))\n #print(pd.read_sql(sql='SELECT * FROM Medal', con=engine))\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.8267326951026917, "alphanum_fraction": 0.8267326951026917, "avg_line_length": 27.85714340209961, "blob_id": "bb73f6e39476ef1d4b13378bbd88bd97af81e207", "content_id": "f5a416d6938ff90b77969e16e48a8c8324d60409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 202, "license_type": "no_license", "max_line_length": 33, "num_lines": 7, "path": "/BDAplus/BDAplus/drop_r_db.sql", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "DROP TABLE City CASCADE;\nDROP TABLE CountryCode CASCADE;\nDROP TABLE Discipline CASCADE;\nDROP TABLE EventActivity CASCADE;\nDROP TABLE Event CASCADE;\nDROP TABLE Athlete CASCADE;\nDROP TABLE Medal CASCADE;\n" }, { "alpha_fraction": 0.5378151535987854, "alphanum_fraction": 0.5798319578170776, "avg_line_length": 17.30769157409668, "blob_id": "7038757d5ec8e7c4ed66af3f306dfbf899053fd4", "content_id": "0c6bbae089979035e975c6ba18b1e4bace6fad29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 51, "num_lines": 13, "path": "/PSSR/3TD/BoiteNoire.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "import math\n\ndef fonction1(x) :\n return (math.log(1+x*x)+math.log(2)+math.pi/2)\t\n\ndef fonction2(x) :\n return math.asin((2*x)/(1+x*x))\n\ndef fonction3(x, y) :\n return (x*y + y*y +1)\n\ndef fonction4(x, y, z) :\n return x*x*y*y*y*z\n" }, { "alpha_fraction": 0.5086419582366943, "alphanum_fraction": 0.5506172776222229, "avg_line_length": 26.89655113220215, "blob_id": "b75e930eeca19bc95adb7d3e114513425c446b63", "content_id": "66d0418ceb0c8435e653a5f50beefd46109ffc3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 810, "license_type": "no_license", "max_line_length": 64, "num_lines": 29, "path": "/PSSR/3TD/integral.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "from BoiteNoire import *\nfrom random import uniform\nimport matplotlib.pyplot as pp\nimport math\n\n# integration pour n dimensions\ndef integral(fun, vals, n):\n surface = reduce(lambda x,y:x*y, [b-a for a,b in vals], 1.0)\n s = 0.0\n dim = len(vals)\n\n for _ in range(n):\n s += fun(*[uniform(a, b) for a,b in vals]) / n\n \n return surface * s\n\nif __name__ == \"__main__\":\n N = 100\n print(integral(fonction1, [[0, 1]], N))\n print(integral(fonction2, [[0, math.sqrt(3)]], N))\n print(integral(fonction3, [[0, 1], [-1, 3]], N))\n print(integral(fonction4, [[3, 7], [-4 ,-1], [2, 3]], N))\n \n N = 100 \n X_axis = [float(i)/N for i in range(N+1)]\n print(X_axis)\n f = lambda x: x *(1-x) * math.sin(200 * x * (1-x))\n pp.plot(X_axis, [f(x) for x in X_axis])\n pp.show() \n" }, { "alpha_fraction": 0.5338345766067505, "alphanum_fraction": 0.5551378726959229, "avg_line_length": 23.90625, "blob_id": "dcb8af9341ca6b22365192c5466190a38b79b1a7", "content_id": "a3bc2ff8f78869f95370dea909c1b51dd38c3b18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 799, "license_type": "no_license", "max_line_length": 81, "num_lines": 32, "path": "/PSSR/3TD/coupon_collector.py", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "from random import randint\nimport matplotlib.pyplot as pp\nimport math\n\ndef simule_CCP(n):\n tirages = [False for _ in range(n)]\n somme_tirages = 0\n run_time = 1\n while somme_tirages < n:\n tirage = randint(0, n-1)\n if not tirages[tirage]:\n tirages[tirage] = True\n somme_tirages += 1\n\n run_time +=1\n \n return run_time\n\ndef moyenne_CCP(n, k):\n return sum([simule_CCP(n) for _ in range(k)])/k\n\n\nif __name__ == \"__main__\":\n # moyenne de k CCP\n N = 20 \n k_n = lambda n: int((100/5.7) * math.pi * n**2)\nœ \n X_axis = range(1, N+1)\n pp.plot(X_axis, [moyenne_CCP(n, k_n(n)) for n in X_axis], label=\"moyenn_CCP\")\n pp.plot(X_axis, [n*math.log(n, 2) for n in X_axis], label=\"nlog(n)\")\n pp.legend(loc=\"best\")\n pp.show()\n\n" }, { "alpha_fraction": 0.5213522911071777, "alphanum_fraction": 0.5604982376098633, "avg_line_length": 16.5625, "blob_id": "74518a619400cf590db0ddefaa475f825d9a4cc9", "content_id": "084eac9796f01075e0447cb16d44fccf04d2a982", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 562, "license_type": "no_license", "max_line_length": 41, "num_lines": 32, "path": "/IE/TP/TP3/pixel.cpp", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <opencv2/opencv.hpp>\nusing namespace cv;\n\nint main() {\n\tVideoCapture cap(0);\n\tif(!cap.isOpened()) {\n\t\tprintf(\"Error !!\");\n\t\treturn -1;\n\t}\n\t\n\tMat image, frame;\n\tVec3b color;\n\tnamedWindow(\"rose effect\", 0);\n\tfor(;;) {\n\t\tcap >> frame;\n\t\timage = frame;\n\t\tfor(int y = 0; y < image.rows; y++) {\n\t\t\tfor(int x = 0; x < image.cols; x++) {\n\t\t\t\tcolor = image.at<Vec3b>(Point(x, y));\n\n\t\t\t\tcolor.val[0] = 255;\n\t\t\t\tcolor.val[2] = 255;\n\n\t\t\t\timage.at<Vec3b>(Point(x, y)) = color;\n\t\t\t}\n\t\t}\n\t\timshow(\"rose effect\", image);\n\t\tif(waitKey(33) == 27) break;\n\t}\n\n\treturn 0;\n}\n" }, { "alpha_fraction": 0.4745672047138214, "alphanum_fraction": 0.4907708168029785, "avg_line_length": 26.54421043395996, "blob_id": "935180079a1cf0a3c4df2bdeb589ace961706369", "content_id": "f644cc370eeb8dddad7a55517499e2198796760f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 26167, "license_type": "no_license", "max_line_length": 141, "num_lines": 950, "path": "/TSI/TI_M1_2020/NRC/tp1.c", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "#include <stdio.h>\n#include <stdlib.h>\n#include <math.h>\n#include <dirent.h>\n#include <limits.h>\n#include <fcntl.h>\n\n#include \"def.h\"\n#include \"nrio.h\"\n#include \"nrarith.h\"\n#include \"nralloc.h\"\n\n\n/* 2D convolution with a mask\n * Retuns an imatrix to avoid overflows\n */\nint** conv2(byte** f, long nrl, long nrh, long ncl, long nch,\n float** mask, long maskw, long maskh)\n{\n int** out = imatrix(nrl, nrh, ncl, nch);\n\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n double acc = 0.0;\n int n = 0;\n\n for (int u = 0; u < maskw; u++) {\n for (int v = 0; v < maskh; v++) {\n int nx = x + u - ((int) (maskw / 2));\n int ny = y + v - ((int) (maskh / 2));\n\n if(nx >= nrl && nx < nrh && ny >= ncl && ny < nch) {\n acc += f[nx][ny] * mask[u][v];\n n++;\n }\n\n }\n }\n\n out[x][y] = acc / n;\n }\n }\n\n return out;\n}\n\n/* map a function to every element of an imatrix\n */\nvoid map(int** m, long nrl, long nrh, long ncl, long nch, int (*func)(int))\n{\n\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n m[x][y] = func(m[x][y]);\n }\n }\n}\n\n/* Add two imatrices\n */\nint** add(int **m1, int **m2, long nrl, long nrh, long ncl, long nch)\n{\n int** out = imatrix(nrl, nrh, ncl, nch);\n\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y] = m1[x][y] + m2[x][y];\n }\n }\n\n return out;\n}\n\n/* Convert an imatrix to a bmatrix\n * PS: casting to smaller type can be lossy\n */\nbyte** convert_imatrix_bmatrix(int** m, long nrl, long nrh, long ncl, long nch)\n{\n\n byte** out = bmatrix(nrl, nrh, ncl, nch);\n\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y] = (byte) m[x][y];\n }\n }\n\n free_imatrix(m, nrl, nrh, ncl, nch);\n\n return out;\n}\n\nint pow2(int x)\n{\n return x * x;\n}\n\nint sqrt2(int x)\n{\n return (int) sqrt((double) x);\n}\n\nint ceil2(int x)\n{\n return 255 * (x > 5) ;\n}\n\nbyte** bgradient(byte** I, long nrl, long nrh, long ncl, long nch)\n{\n float maskd1[3][3] = {{-1, -2, -1},\n {0, 0, 0},\n {1, 2, 1}\n };\n float maskd2[3][3] = {{-1, 0, 1},\n {-2, 0, 2},\n {-1, 0, 1}\n };\n\n float** mask1 = malloc(sizeof(float*) * 3);\n float** mask2 = malloc(sizeof(float*) * 3);\n\n for (int i = 0; i < 3; i++) {\n mask1[i] = malloc(sizeof(float) * 3);\n mask2[i] = malloc(sizeof(float) * 3);\n\n for (int j = 0; j < 3; j++) {\n mask1[i][j] = maskd1[i][j];\n mask2[i][j] = maskd2[i][j];\n }\n }\n\n /* Calculate convolutions for the two masks */\n int** im1 = conv2(I, nrl, nrh, ncl, nch, mask1, 3, 3); // horizontal\n int** im2 = conv2(I, nrl, nrh, ncl, nch, mask2, 3, 3); // vertical\n\n /* Normalize */\n /* map square */\n map(im1, nrl, nrh, ncl, nch, pow2);\n map(im2, nrl, nrh, ncl, nch, pow2);\n\n /* add two matrices */\n int** sum = add(im1, im2, nrl, nrh, ncl, nch);\n /* map square root */\n map(sum, nrl, nrh, ncl, nch, sqrt2);\n\n /* map ceil */\n map(sum, nrl, nrh, ncl, nch, ceil2);\n\n /* Free the dynamically allocated masks */\n for (int i = 0; i < 3; i++) {\n free(mask1[i]);\n free(mask2[i]);\n }\n\n free(mask1);\n free(mask2);\n\n /* Free the convolution results*/\n free_imatrix(im1, nrl, nrh, ncl, nch);\n free_imatrix(im2, nrl, nrh, ncl, nch);\n\n /* convert to byte matrix and save (and frees the sum matrix) */\n return convert_imatrix_bmatrix(sum, nrl, nrh, ncl, nch);\n}\n\nrgb8** rgb8gradient(rgb8** m, long nrl, long nrh, long ncl, long nch)\n{\n byte **R = bmatrix(nrl, nrh, ncl, nch),\n **G = bmatrix(nrl, nrh, ncl, nch),\n **B = bmatrix(nrl, nrh, ncl, nch);\n\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n R[x][y] = m[x][y].r;\n G[x][y] = m[x][y].g;\n B[x][y] = m[x][y].b;\n }\n }\n\n byte** R_Grad = bgradient(R, nrl, nrh, ncl, nch);\n byte** G_Grad = bgradient(G, nrl, nrh, ncl, nch);\n byte** B_Grad = bgradient(B, nrl, nrh, ncl, nch);\n free_bmatrix(R, nrl, nrh, ncl, nch);\n free_bmatrix(G, nrl, nrh, ncl, nch);\n free_bmatrix(B, nrl, nrh, ncl, nch);\n\n\n rgb8** out = rgb8matrix(nrl, nrh, ncl, nch);\n \n // number of components that has to be over the threshold to be considered\n // ex R and G or G and B\n int threshold = 2;\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y].r = out[x][y].g = out[x][y].b = 255 * ((R_Grad[x][y] == 255) + (G_Grad[x][y] == 255) + (B_Grad[x][y] == 255) > threshold);\n }\n }\n\n free_bmatrix(R_Grad, nrl, nrh, ncl, nch);\n free_bmatrix(G_Grad, nrl, nrh, ncl, nch);\n free_bmatrix(B_Grad, nrl, nrh, ncl, nch);\n\n return out;\n}\n\nint min(int a, int b) {\n return (a < b)?a:b;\n}\nint max(int a, int b) {\n return (a > b)?a:b;\n}\n\nint** label(byte** I, long nrl, long nrh, long ncl, long nch, int* num_labels)\n{\n int** labels = imatrix0(nrl, nrh, ncl, nch);\n int init_size = 10; \n int cur_size = 10;\n int cur = 0;\n int* labelMap = malloc(sizeof(int) * init_size);\n \n // init labels and map\n for (int i = 0; i < init_size; i++) {\n labelMap[i] = i;\n }\n \n for (int y = nrl; y < nrh; y++) {\n for (int x = ncl; x < nch; x++) {\n if(I[y][x] != 0) {\n int C = labels[y][x];\n\n int A = (x-1 >= 0)? labels[y][x-1]: 0, PA = (x-1 >= 0)? I[y][x-1]: 0;\n int B = (y-1 >= 0)? labels[y-1][x]: 0, PB = (y-1 >= 0)? I[y-1][x]: 0;\n \n if(PA != 0 && PB == 0) {\n labels[y][x] = A;\n } else if(PB != 0 && PA == 0) {\n labels[y][x] = labelMap[B];\n } else if(PA == 0 && PB == 0) {\n cur ++;\n \n // if we need to realloc the labelMap\n if(cur >= cur_size) {\n int new_size = cur_size * 2;\n labelMap = realloc(labelMap, sizeof(int) * new_size);\n for(int i = cur_size; i < new_size; i++) {\n labelMap[i] = i;\n }\n cur_size = new_size;\n }\n \n labels[y][x] = cur;\n } else if(PA != 0 && PB != 0 && A == B) {\n labels[y][x] = A;\n } else if(PA != 0 && PB != 0 && A != B) {\n labels[y][x] = min(labelMap[B], A);\n C = labels[y][x];\n labelMap[C] = C;\n labelMap[A] = C;\n labelMap[max(labelMap[B], A)] = C;\n }\n }\n } \n }\n\n // update label map\n int label = 0;\n for (int i = 1; i <cur; i++) {\n if(labelMap[i] == i) {\n label ++;\n labelMap[i] = label;\n } else {\n labelMap[i] = labelMap[labelMap[i]];\n }\n }\n *num_labels = label;\n\n for (int y = nrl; y < nrh; y++) {\n for (int x = ncl; x < nch; x++) {\n labels[y][x] = labelMap[labels[y][x]]; \n }\n }\n\n printf(\"\\n\");\n free(labelMap);\n return labels; \n}\n\nrgb8** colorLabels(int** labels, long nrl, long nrh, long ncl, long nch, int num_labels) {\n rgb8** res = rgb8matrix(nrl, nrh, ncl, nch);\n rgb8* colorMap = rgb8vector(1, num_labels);\n for (int i = 0; i < num_labels; i++) {\n colorMap[i].r = rand() % 256;\n colorMap[i].g = rand() % 256;\n colorMap[i].b = rand() % 256;\n }\n\n for (int i = nrl; i < nrh; i++) {\n for (int j = ncl; j < nch; j++) {\n if(labels[i][j] != 0)\n res[i][j] = colorMap[labels[i][j]];\n else {\n res[i][j].r = 0; res[i][j].g = 0; res[i][j].b = 0;\n }\n }\n }\n return res;\n}\n\nbyte** getLabel(int** labels, int nrl, int nrh, int ncl, int nch, int label)\n{\n byte** res = bmatrix(nrl, nrh, ncl, nch);\n for (int x = ncl; x < nch; x++) {\n for (int y = nrl; y < nrh; y++) {\n res[y][x] = (label == labels[y][x])? 255: 0;\n }\n }\n return res;\n}\n\nbyte** erosion(byte** f, long nrl, long nrh, long ncl, long nch, float** mask, long maskw, long maskh, int repeats) {\n float mask_sum = 0;\n\n for (int u = 0; u < maskw; u++) {\n for (int v = 0; v < maskh; v++) {\n mask_sum += mask[u][v];\n }\n }\n byte ** out = bmatrix(nrl, nrh, ncl, nch);\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y] = f[x][y];\n }\n }\n\n byte ** out2 = bmatrix(nrl, nrh, ncl, nch);\n\n for (int i = 0; i < repeats; i++) {\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n float acc = 0.0;\n\n for (int u = 0; u < maskw; u++) {\n for (int v = 0; v < maskh; v++) {\n int nx = x + u - ((int) (maskw / 2));\n int ny = y + v - ((int) (maskh / 2));\n\n if(nx >= nrl && nx < nrh && ny >= ncl && ny < nch) {\n acc += (out[nx][ny] / 255) * mask[u][v];\n }\n\n }\n }\n\n out2[x][y] = (acc == mask_sum) * 255;\n }\n }\n\n byte** temp = out2;\n out2 = out;\n out = temp;\n }\n \n free_bmatrix(out2, nrl, nrh, ncl, nch);\n \n return out;\n}\n\nbyte** dilation(byte** f, long nrl, long nrh, long ncl, long nch, float** mask, long maskw, long maskh, int repeats) {\n byte ** out = bmatrix(nrl, nrh, ncl, nch);\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y] = f[x][y];\n }\n }\n\n byte ** out2 = bmatrix(nrl, nrh, ncl, nch);\n\n for (int i = 0; i < repeats; i++) {\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n float acc = 0.0;\n\n for (int u = 0; u < maskw; u++) {\n for (int v = 0; v < maskh; v++) {\n int nx = x + u - ((int) (maskw / 2));\n int ny = y + v - ((int) (maskh / 2));\n\n if(nx >= nrl && nx < nrh && ny >= ncl && ny < nch) {\n acc += (out[nx][ny] / 255) * mask[u][v];\n }\n\n }\n }\n\n out2[x][y] = (acc > 0) * 255;\n }\n }\n\n byte** temp = out2;\n out2 = out;\n out = temp;\n }\n \n free_bmatrix(out2, nrl, nrh, ncl, nch);\n \n return out;\n}\n\nbyte** openingMorph(byte** f, long nrl, long nrh, long ncl, long nch, float** mask, long maskw, long maskh, int repeats) {\n byte** E = erosion(f, nrl, nrh, ncl, nch, mask, maskw, maskh, repeats);\n byte** D = dilation(E, nrl, nrh, ncl, nch, mask, maskw, maskh, repeats);\n free_bmatrix(E, nrl, nrh, ncl, nch);\n\n return D; \n}\n\nbyte** closingMorph(byte** f, long nrl, long nrh, long ncl, long nch, float** mask, long maskw, long maskh, int repeats) {\n byte** D = dilation(f, nrl, nrh, ncl, nch, mask, maskw, maskh, repeats);\n byte** E = erosion(D, nrl, nrh, ncl, nch, mask, maskw, maskh, repeats);\n free_bmatrix(D, nrl, nrh, ncl, nch);\n\n return E; \n}\n\nint reverse(int x) {\n return 255 - x;\n}\n\n// chacks for the .ppm extension\nint imageNameFormat(const struct dirent* d) {\n int len = strlen(d->d_name);\n if(len > 4 && strcmp(d->d_name + len - 4, \".ppm\") == 0) {\n return 1;\n }\n return 0;\n}\n\nvoid imageDiff(char* dirp, char* dirout) {\n printf(\"Detecting movement\\n\");\n\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n\n int dirp_len = strlen(dirp);\n int dirout_len = strlen(dirout);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n if(dirout[dirout_len-1] != '/') {\n dirout = strdup(dirout);\n dirout = realloc(dirout, dirout_len+2);\n dirout[dirout_len] = '/';\n dirout[dirout_len+1] = '\\0';\n dirout_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n char* outpath = malloc(sizeof(char) * (dirout_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n strcpy(outpath, dirout);\n\n long nrl, nrh, ncl, nch;\n // load the first image\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n rgb8** A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch), \n **B = NULL;\n rgb8** out = rgb8matrix(nrl, nrh, ncl, nch);\n\n for(int i = 1; i < n; i++) {\n // read new image\n if(B != NULL) {\n free_rgb8matrix(B, nrl, nrh, ncl, nch);\n }\n B = A;\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch);\n\n // calc A - B\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n out[x][y].r = abs(A[x][y].r - B[x][y].r);\n out[x][y].g = abs(A[x][y].g - B[x][y].g);\n out[x][y].b = abs(A[x][y].b - B[x][y].b);\n }\n }\n\n // write out\n char* outfilename = malloc(sizeof(char) * FILENAME_MAX);\n sprintf(outfilename, \"diff%03d.ppm\", i);\n memcpy(outpath + dirout_len, outfilename, FILENAME_MAX);\n SavePPM_rgb8matrix(out, nrl, nrh, ncl, nch, outpath);\n }\n free_rgb8matrix(A, nrl, nrh, ncl, nch);\n free_rgb8matrix(B, nrl, nrh, ncl, nch);\n free_rgb8matrix(out, nrl, nrh, ncl, nch);\n}\n\nrgb8** imageAvg(char* dirp, long* nrl, long* nrh, long* ncl, long* nch) {\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n\n int dirp_len = strlen(dirp);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n\n rgb8** A;\n \n // read the first image to get the size\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n\n int ***out = malloc(sizeof(int**) * (*nrh+1));\n for (int i = 0; i < *nrh; i++) {\n out[i] = malloc(sizeof(int*) * (*nch+1));\n for (int j = 0; j < *nch; j++) {\n out[i][j] = calloc(3, sizeof(int));\n }\n }\n\n for(int i = 0; i < n; i++) {\n // read new image\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n\n // add A to out\n for (int x = *nrl; x < *nrh; x++) {\n for (int y = *ncl; y < *nch; y++) {\n out[x][y][0] += A[x][y].r;\n out[x][y][1] += A[x][y].g;\n out[x][y][2] += A[x][y].b;\n }\n }\n\n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n }\n \n rgb8** rgb_out = rgb8matrix(*nrl, *nrh, *ncl, *nch);\n for (int x = *nrl; x < *nrh; x++) {\n for (int y = *ncl; y < *nch; y++) {\n rgb_out[x][y].r = (byte)(((float)out[x][y][0]) / n);\n rgb_out[x][y].g = (byte)(((float)out[x][y][1]) / n);\n rgb_out[x][y].b = (byte)(((float)out[x][y][2]) / n);\n }\n }\n for (int i = 0; i < *nrh; i++) {\n for (int j = 0; j < *nch; j++) {\n free(out[i][j]);\n }\n free(out[i]);\n }\n free(out);\n\n return rgb_out;\n}\n\nvoid imageDiffAvg(char* dirp, char* dirout, ) {\n printf(\"Detecting movement\\n\");\n\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n int dirp_len = strlen(dirp);\n int dirout_len = strlen(dirout);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n if(dirout[dirout_len-1] != '/') {\n dirout = strdup(dirout);\n dirout = realloc(dirout, dirout_len+2);\n dirout[dirout_len] = '/';\n dirout[dirout_len+1] = '\\0';\n dirout_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n char* outpath = malloc(sizeof(char) * (dirout_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n strcpy(outpath, dirout);\n\n long nrl, nrh, ncl, nch;\n // load the first image\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n rgb8** A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch), \n **Iref = imageAvg(dirp, &nrl, &nrh, &ncl, &nch); // CALCULATE AVG OF VIDEO\n\n for(int i = 0; i < n; i++) {\n // read new image\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch);\n\n // add A to out\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n A[x][y].r = abs(A[x][y].r - Iref[x][y].r);\n A[x][y].g = abs(A[x][y].g - Iref[x][y].g);\n A[x][y].b = abs(A[x][y].b - Iref[x][y].b);\n }\n }\n\n // write out\n char* outfilename = malloc(sizeof(char) * FILENAME_MAX);\n sprintf(outfilename, \"diff%03d.ppm\", i);\n memcpy(outpath + dirout_len, outfilename, FILENAME_MAX);\n SavePPM_rgb8matrix(A, nrl, nrh, ncl, nch, outpath);\n \n free_rgb8matrix(A, nrl, nrh, ncl, nch);\n }\n \n free_rgb8matrix(Iref, nrl, nrh, ncl, nch);\n}\n\nrgb8** imageMedian(char* dirp, long *nrl, long *nrh, long *ncl, long *nch) {\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n\n int dirp_len = strlen(dirp);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n\n rgb8** A;\n \n // read the first image to get the size\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n\n int ***out = malloc(sizeof(int**) * (*nrh+1));\n for (int i = 0; i < *nrh; i++) {\n out[i] = malloc(sizeof(int*) * (*nch+1));\n for (int j = 0; j < *nch; j++) {\n out[i][j] = calloc(256*3, sizeof(int));\n }\n }\n\n printf(\"Calculating median image\\n\");\n for(int i = 0; i < n; i++) {\n // read new image\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n\n // add A to out\n for (int x = *nrl; x < *nrh; x++) {\n for (int y = *ncl; y < *nch; y++) {\n out[x][y][0 +A[x][y].r] ++;\n out[x][y][256+A[x][y].g] ++;\n out[x][y][512+A[x][y].b] ++;\n }\n }\n \n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n printf(\"[%d/%d] %s\\r\", i+1, n, curpath);\n }\n printf(\"\\n\");\n \n rgb8** rgb_out = rgb8matrix(*nrl, *nrh, *ncl, *nch);\n for (int x = *nrl; x < *nrh; x++) {\n for (int y = *ncl; y < *nch; y++) {\n int rf = 0, gf = 0, bf = 0;\n int rc = 0, gc = 0, bc = 0;\n int middle = n / 2;\n for (int i = 0; i <= 255; i++) {\n rc += out[x][y][i]; gc += out[x][y][256+i]; bc += out[x][y][512+i];\n\n if(rf == 0 && rc > middle) {\n rgb_out[x][y].r = i;\n rf = 1;\n }\n\n if(gf == 0 && gc > middle) {\n rgb_out[x][y].g = i;\n gf = 1;\n }\n\n if(bf == 0 && bc > middle) {\n rgb_out[x][y].b = i;\n bf = 1;\n }\n }\n }\n }\n\n for (int i = 0; i < *nrh; i++) {\n for (int j = 0; j < *nch; j++) {\n free(out[i][j]);\n }\n free(out[i]);\n }\n free(out);\n\n return rgb_out;\n}\n\nvoid imageDiffMedian(char* dirp, char* dirout) {\n printf(\"Detecting movement\\n\");\n\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n int dirp_len = strlen(dirp);\n int dirout_len = strlen(dirout);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n if(dirout[dirout_len-1] != '/') {\n dirout = strdup(dirout);\n dirout = realloc(dirout, dirout_len+2);\n dirout[dirout_len] = '/';\n dirout[dirout_len+1] = '\\0';\n dirout_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n char* outpath = malloc(sizeof(char) * (dirout_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n strcpy(outpath, dirout);\n\n long nrl, nrh, ncl, nch;\n // load the first image\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n rgb8** A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch), \n **Iref = imageMedian(dirp, &nrl, &nrh, &ncl, &nch); // CALCULATE AVG OF VIDEO\n\n for(int i = 0; i < n; i++) {\n // read new image\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, &nrl, &nrh, &ncl, &nch);\n\n // add A to out\n for (int x = nrl; x < nrh; x++) {\n for (int y = ncl; y < nch; y++) {\n A[x][y].r = abs(A[x][y].r - Iref[x][y].r);\n A[x][y].g = abs(A[x][y].g - Iref[x][y].g);\n A[x][y].b = abs(A[x][y].b - Iref[x][y].b);\n }\n }\n\n // write out\n char* outfilename = malloc(sizeof(char) * FILENAME_MAX);\n sprintf(outfilename, \"diff%03d.ppm\", i);\n memcpy(outpath + dirout_len, outfilename, FILENAME_MAX);\n SavePPM_rgb8matrix(A, nrl, nrh, ncl, nch, outpath);\n \n free_rgb8matrix(A, nrl, nrh, ncl, nch);\n }\n \n free_rgb8matrix(Iref, nrl, nrh, ncl, nch);\n}\n\nvoid imageCeil(char* dirp, int ceilval) {\n struct dirent **namelist;\n int n = scandir(dirp, &namelist, imageNameFormat, alphasort);\n if(n < 0) {\n fprintf(stderr, \"Error scandir\\n\");\n exit(-1);\n }\n if(n < 2) {\n fprintf(stderr, \"Not enough images\\n\");\n exit(-1);\n }\n\n int dirp_len = strlen(dirp);\n int dirout_len = strlen(dirout);\n \n // format check (needs a / at the end of the directory name)\n if(dirp[dirp_len-1] != '/') {\n dirp = strdup(dirp);\n dirp = realloc(dirp, dirp_len+2);\n dirp[dirp_len] = '/';\n dirp[dirp_len+1] = '\\0';\n dirp_len++;\n }\n\n if(dirout[dirout_len-1] != '/') {\n dirout = strdup(dirout);\n dirout = realloc(dirout, dirout_len+2);\n dirout[dirout_len] = '/';\n dirout[dirout_len+1] = '\\0';\n dirout_len++;\n }\n\n char* curpath = malloc(sizeof(char) * (dirp_len + FILENAME_MAX));\n char* outpath = malloc(sizeof(char) * (dirout_len + FILENAME_MAX));\n strcpy(curpath, dirp);\n strcpy(outpath, dirout);\n\n rgb8** A;\n \n // read the first image to get the size\n memcpy(curpath + dirp_len, namelist[0]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n\n for(int i = 0; i < n; i++) {\n // read new image\n memcpy(curpath + dirp_len, namelist[i]->d_name, FILENAME_MAX);\n A = LoadPPM_rgb8matrix(curpath, nrl, nrh, ncl, nch);\n\n // add A to out\n for (int x = *nrl; x < *nrh; x++) {\n for (int y = *ncl; y < *nch; y++) {\n A[x][y].r = A[x][y].r > ceilval * 255;\n A[x][y].g = A[x][y].g > ceilval * 255;\n A[x][y].b = A[x][y].b > ceilval * 255;\n }\n }\n\n // write out\n char* outfilename = malloc(sizeof(char) * FILENAME_MAX);\n sprintf(outfilename, \"ceil%03d.ppm\", i);\n memcpy(outpath + dirout_len, outfilename, FILENAME_MAX);\n SavePPM_rgb8matrix(A, nrl, nrh, ncl, nch, outpath);\n \n free_rgb8matrix(A, *nrl, *nrh, *ncl, *nch);\n }\n \n return rgb_out;\n}\n\nint main(int* argc, char** argv)\n{\n /*\n long nrh, nrl,\n nch, ncl;\n byte **I;\n\n I = LoadPGM_bmatrix(\"../Images/Test/carreBruit.pgm\", &nrl, &nrh, &ncl, &nch);\n \n int r = 3, l = 3;\n float** mask = malloc(r * sizeof(float*));\n for (int i = 0; i < r; i++) {\n mask[i] = malloc(l * sizeof(float));\n for (int j = 0; j < l; j++) {\n mask[i][j] = 1;\n }\n }\n\n int repeats = 8;\n byte** closed = openingMorph(I, nrl, nrh, ncl, nch, mask, r, l, repeats);\n\n SavePGM_bmatrix(closed, nrl, nrh, ncl, nch, \"./opened.pgm\");\n */\n // SavePGM_bmatrix(Grad, nrl, nrh, ncl, nch, \"./cubesx3_gredient.pgm\");\n \n // int num_labels = 0;\n //int** labels = label(Grad, nrl, nrh, ncl, nch, &num_labels);\n //byte** l = convert_imatrix_bmatrix(labels, nrl, nrh, ncl, nch);\n //map(l, nrl, nrh, ncl, nch, reverse);\n //byte** l = getLabel(labels, nrl, nrh, ncl, nch, 300);\n //SavePGM_bmatrix(l, nrl, nrh, ncl, nch, \"./cubesx3_gredient.pgm\");\n //\n //rgb8** colLabels = colorLabels(labels, nrl, nrh, ncl, nch, num_labels);\n //SavePPM_rgb8matrix(colLabels, nrl, nrh, ncl, nch, \"./color_labels.ppm\");\n \n /* \n long nrl, nrh, ncl, nch;\n rgb8** avg = imageMedian(argv[1], &nrl, &nrh, &ncl, &nch);\n printf(\"%d %d %d %d\\n\", nrl, nrh, ncl,nch);\n SavePPM_rgb8matrix(avg, nrl, nrh, ncl, nch, \"./median.ppm\");\n */ \n\n //long nrl, nrh, ncl, nch;\n imageDiffMedian(argv[1], argv[2]);\n \n return 0;\n}\n" }, { "alpha_fraction": 0.46268656849861145, "alphanum_fraction": 0.6940298676490784, "avg_line_length": 15.75, "blob_id": "606507e3a7132655c0a675bcae744e9bd53580fe", "content_id": "4134d874f6e72313bb037ad450804564b24335e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 134, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/BDAplus/requirement.txt", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "numpy==1.18.2\npandas==1.0.3\npsycopg2-binary==2.8.4\npsycopg2==2.8.3\npython-dateutil==2.8.1\npytz==2019.3\nsix==1.14.0\nSQLAlchemy==1.3.15\n" }, { "alpha_fraction": 0.7207207083702087, "alphanum_fraction": 0.7297297120094299, "avg_line_length": 26.75, "blob_id": "73e3dec5e907cfaef1702e8c244dc4cd18574bf7", "content_id": "828a61055a58f3a63d448a5f2e667db88dc4133c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 111, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/BDAplus/README.md", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "## BDA+ project\nInstall the necessary python packages using: \n```pip3 install -r /path/to/requirements.txt\n```\n" }, { "alpha_fraction": 0.6865203976631165, "alphanum_fraction": 0.6865203976631165, "avg_line_length": 16.72222137451172, "blob_id": "4518c2926a8c12780d5e9868169fd21cb197dcaa", "content_id": "b78e7ee15ab6f5d95e14b82c09cedff4bf234b14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 319, "license_type": "no_license", "max_line_length": 48, "num_lines": 18, "path": "/IA/3TP/Makefile", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "UTF-8", "text": "FLAGS=-Wall -g\nLIBS=-lm\nDOC=doc\n\nall: perceptron.o\n\tgcc perceptron.o -o perceptron $(FLAGS) $(LIBS)\n\nperceptron.o: perceptron.c\n\tgcc -c perceptron.c $(FLAGS)\n\ndoc: $(DOC)/rapport.pdf\n\n$(DOC)/rapport.pdf: $(DOC)/rapport.tex\n\tcd $(DOC) && pdflatex rapport.tex # compile pdf\n\nPHONY: clean doc\nclean:\n\trm -f *.o perceptron\n" }, { "alpha_fraction": 0.5234546065330505, "alphanum_fraction": 0.5477244257926941, "avg_line_length": 25.34649658203125, "blob_id": "89a2603690e34e0d0d5ed55b2ad3410771ee30c0", "content_id": "d91cf2d40ce4a800ebbe077d504f0e40433073f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 21476, "license_type": "no_license", "max_line_length": 178, "num_lines": 785, "path": "/IA/4TP/TP4_IA_Kohonen_2018_2019_JA_NB/tp4_base_code/base_opengl.c", "repo_name": "DJAHIDDJ13/S4", "src_encoding": "ISO-8859-1", "text": "/*\r\n#########################\r\nInstallation des packages\r\nsudo apt install libglu1-mesa-dev freeglut3-dev mesa-common-dev\r\n#########################\r\nSimple programme d'affichage de points et de segments en opengl\r\nutilise GL et glut\r\n*/\r\n\r\n#include <stdlib.h>\r\n#include <stdio.h>\r\n#include <stdarg.h>\r\n#include <math.h>\r\n#include <GL/glut.h>\r\n#include <time.h>\r\n#include <string.h>\r\n#include <limits.h>\r\n\r\n#include \"base_opengl.h\"\r\n#include \"ppm.h\"\r\n#include \"kohonen.h\"\r\n\r\n#define DEFAULT_WIDTH 500\r\n#define DEFAULT_HEIGHT 500\r\n#define NB_VILLE 22\r\n#define MODE 1 // 1 FOR TRAVELING SALESMAN PROBLEM, 0 FOR IMAGE COMPRESSION\r\n\r\nint cpt = 0;\r\nint calc = 0;\r\nchar presse;\r\nPoint ville[NB_VILLE];\r\nint anglex = 0;\r\nint angley = 0;\r\nint x, y, xold, yold;\r\nGLuint textureID;\r\n\r\nint width = DEFAULT_WIDTH;\r\nint height = DEFAULT_HEIGHT;\r\n\r\nunsigned char * img = NULL;\r\n\r\n/* Global variables for the network */\r\nfloat EPSILON = 0.1;\r\nKOHONEN* map;\r\nTRAINING_DATA* DataSet;\r\n\r\n/* Reset the weights of the maps to random values between minval and maxval*/\r\nvoid resetMap(KOHONEN* m, int minval, int maxval)\r\n{\r\n for (int i = 0; i < m->sizeX * m->sizeY; i++) {\r\n for (int j = 0; j < m->sizeInput; j++) {\r\n m->weight[i][j] = ((float)rand()) / ((float)RAND_MAX) * (minval + maxval) - minval;\r\n }\r\n }\r\n}\r\n\r\n\r\n/**\r\n * Snap the neurones' weights to the closest data entry (with smallest\r\n * euclidian distance) in order to find the solution to the travelling salesman\r\n * problem (finding the result path)\r\n */\r\nvoid snapToData(KOHONEN* m, TRAINING_DATA* data)\r\n{\r\n int total_size = m->sizeX * m->sizeY;\r\n int *seen = calloc(total_size, sizeof(float));\r\n\r\n // for each city\r\n for(int i = 0; i < data->numInput; i++) {\r\n int closestArg = -1;\r\n float closest = 0;\r\n\r\n // find the neurone that is closest to that city\r\n for(int j = 0; j < total_size; j++) {\r\n float dist = euclidianDistance(data->input[i], m->weight[j], data->sizeInput);\r\n\r\n // while making sure that it was not taken by another city\r\n if((closestArg == -1 || dist < closest) && seen[j] == 0) {\r\n closestArg = j;\r\n closest = dist;\r\n }\r\n }\r\n\r\n // mark the neurone as taken and snap its coordinates to the city's\r\n seen[closestArg] = 1;\r\n memcpy(m->weight[closestArg], data->input[i], sizeof(float) * data->sizeInput);\r\n }\r\n\r\n // handling the excess neurones (because usually we use more neurones than\r\n // there are data entries to find the best path\r\n int last_snap = -1; // variable to store the index of the last neurone that has been snapped to a city\r\n\r\n for (int i = 0; i < total_size; i++) {\r\n // update the index of the last neurone that had been snapped to a city\r\n // and skip because we don't want to change its coordinates anymore\r\n if(last_snap == -1 || seen[i] == 1) {\r\n last_snap = i;\r\n continue;\r\n }\r\n\r\n // if the neurone is an excess (meaning it wasn't the closest to any\r\n // city), assign the coordinates of the last neurone (in order of the\r\n // network's topology ie from 0 to sizeX) that has been snapped to it.\r\n memcpy(m->weight[i], m->weight[last_snap], sizeof(float) * data->sizeInput);\r\n }\r\n\r\n // The result path can be extract from the values of last_snap\r\n\r\n free(seen);\r\n}\r\n\r\n/** Exercice 1\r\n * Initialisation aléatoire de données d'entrée\r\n */\r\nTRAINING_DATA* initialiseRandomData()\r\n{\r\n TRAINING_DATA* DataSet = initTrainingData(20, 2);\r\n\r\n for (int i = 0; i < DataSet->numInput; i++) {\r\n for (int j = 0; j < DataSet->sizeInput; j++) {\r\n DataSet->input[i][j] = ((float)rand()) / ((float)RAND_MAX) * 200;\r\n }\r\n }\r\n return DataSet;\r\n}\r\n\r\n/* Exercice 2\r\n * Initialize the training data to the cities' locations */\r\nTRAINING_DATA* initialiseCitiesData()\r\n{\r\n TRAINING_DATA* DataSet = initTrainingData(NB_VILLE, 2);\r\n\r\n for(int i = 0; i < NB_VILLE; i++) {\r\n DataSet->input[i][0] = ville[i].x;\r\n DataSet->input[i][1] = ville[i].y;\r\n }\r\n\r\n return DataSet;\r\n}\r\n\r\n/** Exercice 3\r\n * Initialize the training data to be the colors of the original image pixels\r\n */\r\nTRAINING_DATA* initialiseImageData()\r\n{\r\n // dividing the pixels of the image into blocks of size block_width x block_width \r\n // so it works for larger images with much more pixels\r\n int block_width = 1; // 1 to use all the pixels as is\r\n TRAINING_DATA* DataSet = initTrainingData(width * height / (block_width * block_width), 3);\r\n \r\n int entryNum = 0;\r\n for (int i = 0; i < width; i += block_width) {\r\n for (int j = 0; j < height; j += block_width) {\r\n int sR = 0,\r\n sG = 0,\r\n sB = 0;\r\n int total = 0;\r\n int limx = (width < i + block_width)? width: i + block_width;\r\n int limy = (height < j + block_width)? height: j + block_width;\r\n\r\n for (int x = i; x < limx; x++) {\r\n for (int y = j; y < limy; y++) {\r\n int pix = x * width + y;\r\n sR += img[3 * pix + 0]; \r\n sG += img[3 * pix + 1]; \r\n sB += img[3 * pix + 2];\r\n total ++;\r\n }\r\n }\r\n\r\n DataSet->input[entryNum][0] = ((float)sR) / total;\r\n DataSet->input[entryNum][1] = ((float)sG) / total;\r\n DataSet->input[entryNum][2] = ((float)sB) / total;\r\n entryNum++;\r\n }\r\n }\r\n\r\n /*\r\n for(int i = 0; i < DataSet->numInput; i++) {\r\n int j = 3 * i;\r\n for (int x = 0; x < block_width; x++) {\r\n for (int y = 0; y < block_width; y++) {\r\n DataSet->input[i][0] = img[j ] ;\r\n DataSet->input[i][1] = img[j + 1];\r\n DataSet->input[i][2] = img[i + 2];\r\n }\r\n }\r\n }\r\n */\r\n\r\n return DataSet;\r\n}\r\n\r\nfloat phi(float x)\r\n{\r\n \r\n float lambda = 0.4;\r\n float beta = 0.05;\r\n if(x < 1) {\r\n return 1;\r\n } else if(x < 2) {\r\n return lambda;\r\n } else if(x < 3) {\r\n return -beta;\r\n }\r\n\r\n return 0;\r\n}\r\n\r\n/** \r\n * Pour exercice 2\r\n */\r\nfloat phi2(float x)\r\n{\r\n float lambda = 0.4;\r\n float beta = 0.05;\r\n\r\n if(x < 1) {\r\n return 1;\r\n } else if(x < 2) {\r\n return lambda;\r\n } else if(x < 3) {\r\n return 0.2;\r\n } else if(x < 4) {\r\n return beta;\r\n }\r\n\r\n return 0;\r\n}\r\n\r\n/**\r\n * Pour exercice 3\r\n */\r\nfloat phi3(float x)\r\n{\r\n if(x < 1) {\r\n return 1;\r\n } else if(x < 2) {\r\n return 0.5;\r\n } else if(x < 3) {\r\n return 0.2;\r\n }\r\n\r\n return 0;\r\n}\r\n\r\n/* affiche la chaine fmt a partir des coordonnées x,y*/\r\nvoid draw_text(float x, float y, const char *fmt, ...)\r\n{\r\n char buf[1024]; //Holds Our String\r\n char *text = buf;\r\n va_list ap; // Pointer To List Of Arguments\r\n\r\n if (fmt == NULL) { // If There's No Text\r\n return; // Do Nothing\r\n }\r\n\r\n va_start(ap, fmt); // Parses The String For Variables\r\n vsprintf(text, fmt, ap); // And Converts Symbols To Actual Numbers\r\n va_end(ap); // Results Are Stored In Text\r\n\r\n glDisable(GL_TEXTURE_2D);\r\n glRasterPos2i( x, y );\r\n\r\n while (*text) {\r\n glutBitmapCharacter(GLUT_BITMAP_8_BY_13, *text++);\r\n }\r\n\r\n glEnable(GL_TEXTURE_2D);\r\n}\r\n\r\nGLuint charger_texture(unsigned char * data)\r\n{\r\n GLuint textureBidule;\r\n glGenTextures(1, &textureBidule); /* Texture name generation */\r\n glBindTexture(GL_TEXTURE_2D, textureBidule); /* Binding of texture name */\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear interpolation for magnification filter */\r\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear interpolation for minifying filter */\r\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);;\r\n\r\n return textureBidule;\r\n}\r\n\r\nunsigned char * transform_img_to_vector(const char * filename, int *width, int *height)\r\n{\r\n Image* image = NULL;\r\n image = readPPM(filename);\r\n\r\n if (image == NULL) {\r\n EXIT_ON_ERROR(\"error loading img\");\r\n }\r\n\r\n int i, j;\r\n unsigned char* data = NULL;\r\n *width = image->x;\r\n *height = image->y;\r\n data = (unsigned char*)malloc(3 * image->x * image->y * sizeof(unsigned char));\r\n\r\n for (i = 0; i < image->x * image->y; i++) {\r\n j = i * 3;\r\n data[j] = image->data[i].r;\r\n data[j + 1] = image->data[i].g;\r\n data[j + 2] = image->data[i].b;\r\n }\r\n\r\n if (image != NULL) {\r\n free(image->data);\r\n image->data = NULL;\r\n free(image);\r\n image = NULL ;\r\n }\r\n\r\n return data;\r\n}\r\n\r\n// le code original n'as pas marché sur debian\r\nvoid load_cities()\r\n{\r\n FILE * file = NULL;\r\n\r\n if ((file = fopen(\"Villes_et_positions_dans_image.txt\", \"r\")) == NULL) {\r\n EXIT_ON_ERROR(\"error while loading cities txt file\");\r\n }\r\n\r\n int x, y;\r\n char *buffer = NULL;\r\n\r\n // %ms lis et alloc jusqu'a il trouve espace\r\n for(int i = 0; (fscanf(file, \"%ms %d %d\\n\", &buffer, &x, &y)) == 3; i++) {\r\n strcpy(ville[i].name, buffer);\r\n ville[i].x = x - 5; // shift du au resize de l'image\r\n ville[i].y = y - 5;\r\n\r\n free(buffer);\r\n }\r\n\r\n fclose(file);\r\n}\r\n\r\n/* Initialize OpenGL Graphics */\r\nvoid initGL(int w, int h)\r\n{\r\n\r\n#if MODE\r\n int taille_point = 15;\r\n glViewport(0, 0, w, h); // use a screen size of WIDTH x HEIGHT\r\n#else\r\n int taille_point = 5;\r\n glViewport(0, 0, 256, 256); // use a screen size of WIDTH x HEIGHT\r\n#endif\r\n\r\n glEnable(GL_TEXTURE_2D); // Enable 2D texturing\r\n glMatrixMode(GL_PROJECTION); // Make a simple 2D projection on the entire window\r\n glLoadIdentity();\r\n\r\n#if MODE\r\n glOrtho(0.0, w, h, 0.0, -1, 1);\r\n#else\r\n glOrtho(0.0, 256, 256, 0.0, -10, 10);\r\n#endif\r\n\r\n glPointSize(taille_point);\r\n glMatrixMode(GL_MODELVIEW); // Set the matrix mode to object modeling\r\n \r\n glClearColor(0.0f, 0.0f, 0.0f, 0.0f);\r\n glClearDepth(0.0f);\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); // Clear the window\r\n}\r\n\r\n/* *************************************************** */\r\n/* main */\r\nint main(int argc, char **argv)\r\n{\r\n if (argc != 2) {\r\n EXIT_ON_ERROR(\"You must specified a .ppm file\");\r\n }\r\n\r\n img = transform_img_to_vector(argv[1], &width, &height);\r\n printf(\"Usage:\\n\"\r\n \"Press P to start learning \\n\"\r\n \"Press R to reset the neurones randomly\\n\"\r\n \"Use arrows to change the value of epsilon\\n\"\r\n \"\\tUp arrow to increase the value\\n\"\r\n \"\\tDown to decrease\\n\"\r\n \"\\tLeft and right to reset to 0.1\\n\");\r\n\r\n#if MODE\r\n load_cities();\r\n\r\n // INITIALIZING THE NETWORK\r\n map = initKohonen(50, 1, 2, phi2, loopTopologicalDistance); // takes the sizeX, sizeY, size of input vector, function callback for the neighborhood function phi\r\n resetMap(map, 50, 750); // randomize the values of the weights\r\n DataSet = initialiseCitiesData(); // load the location of the cities into the training data\r\n printf(\"Press S to snap the neurones to the data points\\n\");\r\n#else\r\n\r\n // INITIALIZING THE NETWORK\r\n int numCol = 16; // can be 32, 256 for more colors <<<<<<<<<<<<<<<<<<\r\n int networkSize = log(numCol) / log(2.0);\r\n map = initKohonen(networkSize, networkSize, 3, phi3, topologicalDistance); // takes the sizeX, sizeY, size of input vector, function callback for the neighborhood function phi\r\n resetMap(map, 0, 256); // randomize the values of the weights\r\n DataSet = initialiseImageData(); // load the location of the cities into the training data\r\n printf(\"Press S to save the compressed image\\n\");\r\n#endif\r\n\r\n /* GLUT init */\r\n glutInit(&argc, argv); // Initialize GLUT\r\n glutInitDisplayMode(GLUT_DOUBLE); // Enable double buffered mode\r\n\r\n#if MODE\r\n glutInitWindowSize(width, height); // Set the window's initial width & height\r\n#else\r\n glutInitWindowSize(512, 512); // Set the window's initial width & height\r\n#endif\r\n\r\n glutCreateWindow(\"Kohonen\"); // Create window with the name of the executable\r\n\r\n /* enregistrement des fonctions de rappel */\r\n glutDisplayFunc(affichage);\r\n glutKeyboardFunc(clavier);\r\n glutSpecialFunc(clavierSpecial);\r\n glutReshapeFunc(reshape);\r\n glutIdleFunc(idle);\r\n glutMouseFunc(mouse);\r\n glutMotionFunc(mousemotion);\r\n\r\n /* OpenGL 2D generic init */\r\n initGL(width, height);\r\n\r\n#if MODE\r\n textureID = charger_texture(img);\r\n#endif\r\n\r\n /* Main loop */\r\n glutMainLoop();\r\n\r\n#if MODE\r\n\r\n if (img != NULL) {\r\n free(img);\r\n img = NULL;\r\n }\r\n\r\n /* Delete used resources and quit */\r\n glDeleteTextures(1, &textureID);\r\n freeTrainingData(&DataSet);\r\n freeKohonen(&map);\r\n#endif\r\n\r\n return 0;\r\n}\r\n\r\n/* *************************************************** */\r\n\r\n/* Helper for HSLtoRGB */\r\nfloat HueToRgb(float p, float q, float t)\r\n{\r\n if (t < 0.0f) t += 1.0f;\r\n if (t > 1.0f) t -= 1.0f;\r\n if (t < 1.0f / 6.0f) return p + (q - p) * 6.0f * t;\r\n if (t < 1.0f / 2.0f) return q;\r\n if (t < 2.0f / 3.0f) return p + (q - p) * (2.0f / 3.0f - t) * 6.0f;\r\n return p;\r\n}\r\n\r\n/* Convert HSL (Hue, Saturation, Lightness) color to RGB\r\n * Used to make the sliding color effect on the neurones display */\r\nvoid HSLtoRGB(float H, float S, float L, float *R, float *G, float *B)\r\n{\r\n if (S == 0.0f) {\r\n *R = *G = *B = L;\r\n } else {\r\n float q = L < 0.5f ? L * (1.0f + S) : L + S - L * S;\r\n float p = 2.0f * L - q;\r\n *R = HueToRgb(p, q, H + 1.0f / 3.0f);\r\n *G = HueToRgb(p, q, H);\r\n *B = HueToRgb(p, q, H - 1.0f / 3.0f);\r\n }\r\n}\r\n\r\n/** Pour exercice 2\r\n * Draw the network\r\n */\r\nvoid drawKohonen2D(KOHONEN* m)\r\n{\r\n int total_size = m->sizeX * m->sizeY;\r\n\r\n // draw neurones\r\n float H = 0;\r\n float S = 1, L = 0.5; // saturation 100% and Lightness 50%\r\n float R, G, B;\r\n\r\n for (int i = 0; i < total_size; i++) {\r\n HSLtoRGB(H, S, L, &R, &G, &B);\r\n\r\n glBegin(GL_POINTS);\r\n glColor3f(R, G, B);\r\n glVertex2f(m->weight[i][0], m->weight[i][1]);\r\n glEnd();\r\n glColor3f(0, 0, 0);\r\n\r\n H = ((float)i) / (total_size - 1); // go through all hue values from 0 to 1 (to get all the colors)\r\n }\r\n\r\n glLineWidth(2);\r\n\r\n // draw synapses\r\n for (int i = 0; i < m->sizeY; i++) {\r\n for (int j = 0; j < m->sizeX; j++) {\r\n int cur_point = i * m->sizeX + j,\r\n bottom_point = i * m->sizeX + (j + 1),\r\n right_point = (i + 1) * m->sizeX + j;\r\n\r\n // Draw a line into the right neighbor\r\n if(right_point < total_size) {\r\n glBegin(GL_LINES);\r\n glColor3f(0.0, 0.0, 1.0);\r\n glVertex2f(m->weight[cur_point][0], m->weight[cur_point][1]);\r\n glVertex2f(m->weight[right_point][0], m->weight[right_point][1]);\r\n glEnd();\r\n }\r\n\r\n // Draw the line into the bottom neighbor\r\n if(bottom_point < total_size) {\r\n glBegin(GL_LINES);\r\n glColor3f(0.0, 0.0, 1.0);\r\n glVertex2f(m->weight[cur_point][0], m->weight[cur_point][1]);\r\n glVertex2f(m->weight[bottom_point][0], m->weight[bottom_point][1]);\r\n glEnd();\r\n }\r\n\r\n glColor3f(0.0, 0.0, 0.0); // reset color\r\n }\r\n }\r\n\r\n}\r\n\r\n/** Pour exercice 3\r\n * Draw the color matrix \r\n */\r\nvoid drawKohonenRGB(KOHONEN* m)\r\n{\r\n int wx = 256 / m->sizeX,\r\n wy = 256 / m->sizeY;\r\n\r\n for (int i = 0; i < m->sizeX; i++) {\r\n for (int j = 0; j < m->sizeY; j++) {\r\n int x = i * 256 / m->sizeX,\r\n y = j * 256 / m->sizeY;\r\n \r\n glPushMatrix();\r\n glTranslatef(x, y, 0.0f);\r\n \r\n glBegin(GL_QUADS);\r\n glColor3f(m->weight[i * m->sizeX + j][0]/256.0, \r\n m->weight[i * m->sizeX + j][1]/256.0, \r\n m->weight[i * m->sizeX + j][2]/256.0);\r\n glVertex2f(0 , 0 );\r\n glVertex2f(wx, 0 );\r\n glVertex2f(wx, wy);\r\n glVertex2f(0 , wy);\r\n glEnd();\r\n glPopMatrix();\r\n }\r\n }\r\n}\r\n\r\n/** Pour exercice 3\r\n * Ecriture de l'image compressé\r\n */\r\nvoid writeCompressed()\r\n{\r\n Image* image = malloc(sizeof(Image));\r\n image->x = width;\r\n image->y = height;\r\n\r\n image->data = malloc(sizeof(unsigned char) * 3 * width * height);\r\n \r\n for (int p = 0; p < width * height; p++) {\r\n int c = 3 * p;\r\n \r\n int minval = INT_MAX;\r\n int argmin = -1;\r\n for(int i = 0; i < map->sizeX * map->sizeY; i++) {\r\n float dist = (img[c + 0] - map->weight[i][0]) * (img[c + 0] - map->weight[i][0])\r\n + (img[c + 1] - map->weight[i][1]) * (img[c + 1] - map->weight[i][1]) \r\n + (img[c + 2] - map->weight[i][2]) * (img[c + 2] - map->weight[i][2]);\r\n if(argmin == -1 || minval > dist) {\r\n minval = dist;\r\n argmin = i;\r\n }\r\n }\r\n image->data[p].r = map->weight[argmin][0];\r\n image->data[p].g = map->weight[argmin][1];\r\n image->data[p].b = map->weight[argmin][2];\r\n }\r\n\r\n writePPM(\"compressed.ppm\", image);\r\n}\r\n\r\n/* fonction d'affichage appelée a chaque refresh*/\r\nvoid affichage()\r\n{\r\n // Clear color and depth buffers\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);\r\n glMatrixMode(GL_MODELVIEW); // Operate on model-view matrix\r\n glLoadIdentity();\r\n\r\n#if MODE\r\n int i ;\r\n /* Draw a quad */\r\n glColor3f(1.0, 1.0, 1.0);\r\n glBegin(GL_QUADS);\r\n glTexCoord2i(0, 0);\r\n glVertex2i(0, 0);\r\n glTexCoord2i(0, 1);\r\n glVertex2i(0, height);\r\n glTexCoord2i(1, 1);\r\n glVertex2i(width, height);\r\n glTexCoord2i(1, 0);\r\n glVertex2i(width, 0);\r\n glEnd();\r\n\r\n for (i = 0; i < NB_VILLE; i++) {\r\n glBegin(GL_POINTS);\r\n glColor3f(1.0, 0.0, 0.0);\r\n glVertex2f(ville[i].x, ville[i].y);\r\n glEnd();\r\n glColor3f(0, 0, 0);\r\n draw_text(ville[i].x - 20, ville[i].y + 20, \"%s\", ville[i].name);\r\n }\r\n\r\n /* Draw the network */\r\n drawKohonen2D(map);\r\n\r\n glColor3f(0.0, 0.0, 0.0);\r\n draw_text(60, 70, \"nb iter: %d\", cpt);\r\n draw_text(60, 85, \"EPSILON: %.2f\", EPSILON);\r\n#else\r\n drawKohonenRGB(map);\r\n\r\n glColor3f(0.0, 0.0, 0.0);\r\n draw_text(150, 20, \"nb iter: %d\", cpt);\r\n draw_text(150, 15, \"EPSILON: %.2f\", EPSILON);\r\n#endif\r\n\r\n\r\n glFlush();\r\n glutSwapBuffers();\r\n}\r\n\r\n// VOTRE CODE DE KOHONEN ICI\r\nvoid idle()\r\n{\r\n if (calc) { // calc est modifié si on presse \"p\" (voir la fonction \"clavier\" ci dessous)\r\n cpt++; // un simple compteur\r\n\r\n int choice = rand() % DataSet->numInput;\r\n updateKohonen(map, DataSet->input[choice], EPSILON);\r\n\r\n /*for(int i = 0; i < 20; i++) {\r\n printf(\"%g %g\\n\", map->weight[i][0], map->weight[i][1]);\r\n }*/\r\n\r\n\r\n glutPostRedisplay();\r\n }\r\n}\r\n\r\n/** \r\n * Pour traiter les fléches \r\n */\r\nvoid clavierSpecial(int touche, int x, int y)\r\n{\r\n switch(touche) {\r\n case GLUT_KEY_UP:\r\n EPSILON = fmin(EPSILON + 0.02, 2);\r\n break;\r\n\r\n case GLUT_KEY_DOWN:\r\n EPSILON = fmax(EPSILON - 0.02, 0.01);\r\n break;\r\n\r\n case GLUT_KEY_LEFT:\r\n case GLUT_KEY_RIGHT:\r\n EPSILON = 0.1;\r\n break;\r\n }\r\n\r\n glutPostRedisplay();\r\n}\r\nvoid clavier(unsigned char touche, int x, int y)\r\n{\r\n switch (touche) {\r\n case 'p':\r\n calc = !calc;\r\n break;\r\n\r\n case 's': // Snap the kohonen map to the data\r\n#if MODE\r\n snapToData(map, DataSet);\r\n printf(\"Snapping neuones to the data\\n\");\r\n#else\r\n if(calc) {\r\n printf(\"Please stop the learning to save, press p\\n\");\r\n } else {\r\n printf(\"Saving compressed image to ./compressed.ppm\\n\");\r\n writeCompressed(); \r\n }\r\n#endif\r\n break;\r\n\r\n case 'r':\r\n#if MODE\r\n resetMap(map, 50, 750);\r\n#else\r\n resetMap(map, 0, 256);\r\n#endif\r\n printf(\"Randomizing the neurones' weights..\\n\");\r\n break;\r\n\r\n case 'q': /* la touche 'q' permet de quitter le programme */\r\n exit(0);\r\n } /* switch */\r\n\r\n glutPostRedisplay();\r\n} /* clavier */\r\n\r\n\r\nvoid reshape(GLsizei newwidth, GLsizei newheight)\r\n{\r\n // On ecrase pas width et height dans le cas image car il s'agira de la taille de l'image\r\n#if MODE\r\n width = newwidth;\r\n height = newheight;\r\n#else\r\n#endif\r\n // Set the viewport to cover the new window\r\n glViewport(0, 0, newwidth, newheight );\r\n glMatrixMode(GL_PROJECTION);\r\n glLoadIdentity();\r\n\r\n#if MODE\r\n glOrtho(0.0, width, height, 0.0, -1, 1);\r\n#else\r\n glOrtho(0.0, 256, 256, 0.0, -10, 10);\r\n#endif\r\n\r\n glMatrixMode(GL_MODELVIEW);\r\n\r\n glutPostRedisplay();\r\n}\r\n\r\n\r\n/* getion des boutons de la souris*/\r\nvoid mouse(int bouton, int etat, int x, int y)\r\n{\r\n /* si on appuie sur la bouton de gauche */\r\n if (bouton == GLUT_LEFT_BUTTON && etat == GLUT_DOWN) {\r\n presse = 1; // vrai\r\n xold = x; // sauvegarde de la position de la souris\r\n yold = y;\r\n }\r\n\r\n /* si on relache la souris */\r\n if (bouton == GLUT_LEFT_BUTTON && etat == GLUT_UP) {\r\n presse = 0; // faux\r\n }\r\n} /* mouse */\r\n\r\n\r\n\r\n/*gestion des mouvements de la souris */\r\nvoid mousemotion(int x, int y)\r\n{\r\n if (presse) { /* si le bouton gauche est presse */\r\n /* on mofifie les angles de rotation de l'objet en fonction de la position actuelle de la souris et de la derniere position sauvegard?e */\r\n anglex = anglex + (x - xold);\r\n angley = angley + (y - yold);\r\n glutPostRedisplay();\r\n }\r\n\r\n xold = x; /* sauvegarde des valeurs courante des positions de la souris */\r\n yold = y;\r\n} /* mousemotion */\r\n" } ]
31
bernice75/bookmark
https://github.com/bernice75/bookmark
d388f6e940745a3886e000fa7adf503d7d77162a
1b0adac976ea78564f20e00d3d83e7e648465e17
7432d5474b56d8a641eb614e0efd3895ecf938f2
refs/heads/master
2022-04-21T20:24:12.068410
2020-04-21T09:48:21
2020-04-21T09:48:21
257,539,110
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7280939221382141, "alphanum_fraction": 0.7289972901344299, "avg_line_length": 32.57575607299805, "blob_id": "e13b9d7d26ffed00f905ca74d209d0c96074ee76", "content_id": "21e982de9ccc7feaa414360dd5420d0dc2ceeb70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1303, "license_type": "no_license", "max_line_length": 83, "num_lines": 33, "path": "/bookmark/views.py", "repo_name": "bernice75/bookmark", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views.generic.detail import DetailView\nfrom django.urls import reverse_lazy\nfrom .models import Bookmark\n\n# Create your views here.\nclass BookmarkListView(ListView) :\n model = Bookmark\n paginate_by = 5\n\nclass BookmarkCreateView(CreateView) :\n model = Bookmark\n fields = ['site_name', 'url']\n success_url = reverse_lazy('list') # 북마크 url에서 name이 list인 것을 찾아 거기 있는 view로 이동\n template_name_suffix = '_create' # bookmark_create.html 화면을 찾아가라는 의미\n\nclass BookmarkDetailView(DetailView) :\n model = Bookmark\n\nclass BookmarkUpdateView(UpdateView) :\n model = Bookmark\n fields = ['site_name', 'url']\n template_name_suffix = '_update'\n # 여기에는 success_url이 없어 업데이트 버튼을 누른 후 어디로 갈 지 정해져 있지 않음\n # 이럴 경우 models 파일에 정의해놓은 get_absolute_url 메소드로 감\n # get_absolute_url 메소드에는 detail 화면으로 가라는 지시가 있음\n # detail 화면으로 가게 됨\n\nclass BookmarkDeleteView(DeleteView) :\n model = Bookmark\n success_url = reverse_lazy('list')" } ]
1
yangmiok/django-email
https://github.com/yangmiok/django-email
df249b47d5c6a3ed8302c88091f613802c94d6b1
2fd419755d919f55d9e3a0176df393329243fc79
2c8bf6ddfc11423ca57c9a8b2b725e72480603eb
refs/heads/master
2021-06-16T18:24:54.374694
2017-06-11T00:05:45
2017-06-11T00:05:45
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 45.5, "blob_id": "b5118764acbe4420758845aae66c23736ea6ae23", "content_id": "127381f7c6cae264f621b9985a8272d32aa3df6b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 117, "license_type": "no_license", "max_line_length": 53, "num_lines": 2, "path": "/sendemail.py", "repo_name": "yangmiok/django-email", "src_encoding": "UTF-8", "text": "from django.core.mail import send_mail\r\nsend_mail(\"这是邮件标题\", \"这是邮件主体\", '[email protected]', ['[email protected]'])" } ]
1
DistriNet/avalanche-ndss2020
https://github.com/DistriNet/avalanche-ndss2020
8dc7e62d7e2562890a89f783ef4b2017aa54e72d
1b83f32ebb07dde9c66e2a37c2de8e1ca20791bd
1f85b6a62bd5713170ae76a25b3709f0654b2e25
refs/heads/master
2022-04-09T22:16:14.252469
2020-02-24T00:27:37
2020-02-24T00:27:37
238,191,827
6
0
null
null
null
null
null
[ { "alpha_fraction": 0.5859414935112, "alphanum_fraction": 0.5910723209381104, "avg_line_length": 56.32352828979492, "blob_id": "34707d90da146885564ce4c14f0586c00b6990e8", "content_id": "0c2cf381f9f4977089b777a3bca087ddeac162d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7796, "license_type": "no_license", "max_line_length": 199, "num_lines": 136, "path": "/feature_generation/generate_ground_truth.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import csv\nimport os\nfrom subprocess import run\n\n# Execute after generating feature values. (generate_feature_values)\n\ndef prepare_files_oneset(snapshot_date, classification_types, input_folder, output_folder, with_classification=True):\n classification = {}\n\n if os.path.exists(\"{}/{}/domains_classification.csv\".format(input_folder, snapshot_date)):\n with open(\"{}/{}/domains_classification.csv\".format(input_folder, snapshot_date)) as classification_file:\n classification_csvr = csv.reader(classification_file)\n for row in classification_csvr:\n classification[row[0]] = row[1:]\n for classification_type in classification_types:\n with open(\"{}/{}/feature_values_{}.csv\".format(output_folder, snapshot_date, classification_type)) as values_file:\n csvr = csv.reader(values_file)\n header = next(csvr)\n domain_idx = header.index(\"domain\")\n with open(\"{}/{}/weka_output_{}.csv\".format(output_folder, snapshot_date, classification_type), \"w\") as weka_output:\n csvw = csv.writer(weka_output)\n csvw.writerow([\"domain\"] + header[:domain_idx] + header[domain_idx + 1:] + [\"class\"])\n for row in csvr:\n domain = row[domain_idx]\n if with_classification:\n if domain not in classification:\n print(\"Not classified:\", domain)\n continue\n domain_class_row = classification.get(domain)\n if domain_class_row[2] == \"undetermined\":\n continue\n domain_class = \"malicious\" if domain_class_row[3] == \"True\" else \"benign\"\n else:\n domain_class = None\n csvw.writerow([domain] + row[:domain_idx] + row[domain_idx + 1:] + [domain_class])\n cmd = '''head -n 1 weka_output_{}.csv > use_in_weka.csv; for f in '''.format(classification_types[0])\n cmd += \" \".join([\"weka_output_{}.csv\".format(t) for t in classification_types])\n cmd += '''; do tail -n +2 $f | sed 's/\"Limited Liability Company \"\"Registrar of domain names REG.RU\"\"\"/\"Limited Liability Company Registrar of domain names REG.RU\"/g' >> use_in_weka.csv; done;'''\n run(cmd,\n cwd=os.path.join(os.path.dirname(os.path.realpath(__file__)), output_folder, snapshot_date), shell=True )\n\ndef prepare_files_multiplesets_split_by_features_all_instances(snapshot_date, classification_types, input_folder, output_folder, with_classification=True):\n\n with open(\"{}/{}/feature_values_{}.csv\".format(output_folder, snapshot_date, classification_types[0])) as values_file:\n csvr = csv.reader(values_file)\n header = next(csvr)\n domain_idx = header.index(\"domain\")\n dnsdb_idx = header.index(\"dnsdb_available\")\n whois_idx = header.index(\"whois_available\")\n # openintel_idx = header.index(\"openintel_available\")\n\n\n output_files = {}\n for available in [\"dnsdb\", \"whois\", \"none\"]: #\"openintel\",\n output_file = open(\"{}/{}/weka_multi_output_features_all_instances_{}.csv\".format(output_folder, snapshot_date, available), \"w\")\n output_csvw = csv.writer(output_file)\n output_header = header.copy()\n idxes_to_keep = set()\n # if available == \"none\":\n idxes_to_keep.update({header.index(f) for f in header if not f.startswith(\"dnsdb\") and not f.startswith(\"whois\") and f != \"domain\" and f != \"suffix_type\"})\n if available == \"dnsdb\":\n idxes_to_keep.update({header.index(f) for f in header if f.startswith(\"dnsdb_\") and f.split(\"_\")[-1] not in \"CAA HINFO PTR RP SPF\".split()})\n idxes_to_keep -= {dnsdb_idx}\n elif available == \"whois\":\n idxes_to_keep.update({header.index(f) for f in header if f.startswith(\"whois_\") }) # and f != \"whois_registrar\"\n idxes_to_keep -= {whois_idx}\n # elif available == \"openintel\":\n # idxes_to_keep.update({header.index(f) for f in header if f.startswith(\"openintel_\") }) # and f != \"whois_registrar\"\n # idxes_to_keep -= {openintel_idx}\n\n output_header = [el for idx, el in enumerate(output_header) if idx in idxes_to_keep]\n output_files[available] = (output_csvw, idxes_to_keep)\n\n output_csvw.writerow([\"domain\"] + output_header +[\"class\"])\n\n classification = {}\n if os.path.exists(\"{}/{}/domains_classification.csv\".format(input_folder, snapshot_date)):\n with open(\"{}/{}/domains_classification.csv\".format(input_folder, snapshot_date)) as classification_file:\n classification_csvr = csv.reader(classification_file)\n for row in classification_csvr:\n classification[row[0]] = row[1:]\n\n for classification_type in classification_types:\n with open(\"{}/{}/feature_values_{}.csv\".format(output_folder, snapshot_date, classification_type)) as values_file:\n csvr = csv.reader(values_file)\n next(csvr)\n for row in csvr:\n domain = row[domain_idx]\n if with_classification:\n if domain not in classification:\n print(\"Not classified:\", domain)\n continue\n domain_class_row = classification.get(domain)\n if domain_class_row[2] == \"undetermined\":\n continue\n domain_class = \"malicious\" if domain_class_row[3] == \"True\" else \"benign\"\n else:\n domain_class = None\n\n dnsdb_available = row[dnsdb_idx]\n whois_available = row[whois_idx]\n # openintel_available = row[openintel_idx]\n\n # While passive DNS is considered a different data set in terms of cost/...,\n # the absence of data from passive DNS can be considered equal to having 0 queries.\n if True or dnsdb_available == \"True\":\n output_dnsdb, idxes_to_keep_dnsdb = output_files[\"dnsdb\"]\n dnsdb_row = [el for idx, el in enumerate(row) if idx in idxes_to_keep_dnsdb]\n output_dnsdb.writerow([domain] + dnsdb_row + [domain_class])\n\n if whois_available == \"True\":\n output_whois, idxes_to_keep_whois = output_files[\"whois\"]\n whois_row = [el for idx, el in enumerate(row) if idx in idxes_to_keep_whois]\n output_whois.writerow([domain] + whois_row + [domain_class])\n\n # if openintel_available == \"True\":\n # output_openintel, idxes_to_keep_openintel = output_files[\"openintel\"]\n # openintel_row = [el for idx, el in enumerate(row) if idx in idxes_to_keep_openintel]\n # output_openintel.writerow([domain] + openintel_row + [domain_class])\n\n output_none, idxes_to_keep_none = output_files[\"none\"]\n none_row = [el for idx, el in enumerate(row) if idx in idxes_to_keep_none]\n output_none.writerow([domain] + none_row + [domain_class])\n\nif __name__ == '__main__':\n input_tuples = [(\"20171129\", [\"no_action\", \"action_seize\"]),\n (\"20181129\", [\"no_action\", \"action_seize\"]),\n (\"20191129\", [\"no_action\", \"action_seize\"])]\n\n input_folder = \"input_data\"\n output_folder = \"output_data\"\n\n for snapshot_date, classification_types in input_tuples:\n\n prepare_files_multiplesets_split_by_features_all_instances(snapshot_date, classification_types, input_folder, output_folder, with_classification=True)\n prepare_files_oneset(snapshot_date, classification_types, input_folder, output_folder, with_classification=True)\n" }, { "alpha_fraction": 0.6698055863380432, "alphanum_fraction": 0.67648845911026, "avg_line_length": 36.40909194946289, "blob_id": "c69ba97178ad5546ef0f615e34763baa605a1634", "content_id": "391a674d897c423b034daa209f30a2b9239a74cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3292, "license_type": "no_license", "max_line_length": 116, "num_lines": 88, "path": "/evaluation_code_and_models/dataprocessing/sampleselection.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom evaluation.postanalysis import workReducedPostDomains\nfrom joblib import load\n\nimport random as rand\n\ndef random(x, y, **kwargs):\n '''randomly pick domains'''\n try:\n fraction = kwargs['fraction']\n except KeyError:\n fraction = 0.1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=1-fraction, shuffle=True)\n return x_train, x_test, y_train, y_test\n\ndef practical(x, y, **kwargs):\n '''pick domains in the most practical manner, those that are most likely to have to be classified manually'''\n sourcepattern = kwargs['code']\n\n clf = load('models/2017/model' + sourcepattern + '.joblib')\n scores = clf.predict_proba(x)\n\n negative_pred_ind, no_action_pred_ind, positive_pred_ind = workReducedPostDomains('2017', sourcepattern, scores)\n positive_pred = x.loc[positive_pred_ind]\n negative_pred = x.loc[negative_pred_ind]\n x_train = x.loc[no_action_pred_ind]\n\n y_train = y[no_action_pred_ind]\n print('benign', len(y_train)-sum(y_train), 'malicious', sum(y_train))\n positive_pred_labels = y[positive_pred_ind]\n negative_pred_labels = y[negative_pred_ind]\n\n x_test = pd.concat([positive_pred, negative_pred])\n y_test = np.concatenate((positive_pred_labels, negative_pred_labels))\n\n return x_train, x_test, y_train, y_test\n\ndef createTrueFalseList(length, true_indices):\n out = []\n for i in range(length):\n if i in true_indices:\n out.append(True)\n else:\n out.append(False)\n return out\n\ndef practicalFraction(x,y, **kwargs):\n try:\n fraction = kwargs['fraction']\n except KeyError:\n fraction = 0.5\n\n sourcepattern = kwargs['code']\n\n clf = load('models/2017/model' + sourcepattern + '.joblib')\n scores = clf.predict_proba(x)\n\n negative_pred_ind, no_action_pred_ind, positive_pred_ind = workReducedPostDomains('2017', sourcepattern, scores)\n\n ind_where_true = [i for i, b in zip(range(len(no_action_pred_ind)), no_action_pred_ind) if b]\n if fraction <= 1:\n amount_of_train_domains = int(fraction*len(ind_where_true))\n else:\n amount_of_train_domains = fraction\n ind_where_true_train = rand.sample(ind_where_true, amount_of_train_domains)\n ind_where_true_test = [i for i in ind_where_true if i not in ind_where_true_train]\n no_action_pred_ind_train = createTrueFalseList(len(no_action_pred_ind), ind_where_true_train)\n no_action_pred_ind_test = createTrueFalseList(len(no_action_pred_ind), ind_where_true_test)\n\n positive_pred = x.loc[positive_pred_ind]\n negative_pred = x.loc[negative_pred_ind]\n no_action_test = x.loc[no_action_pred_ind_test]\n x_train = x.loc[no_action_pred_ind_train]\n\n y_train = y[no_action_pred_ind_train]\n no_action_test_labels = y[no_action_pred_ind_test]\n print('benign', len(y_train) - sum(y_train), 'malicious', sum(y_train))\n positive_pred_labels = y[positive_pred_ind]\n negative_pred_labels = y[negative_pred_ind]\n\n x_test = pd.concat([positive_pred, negative_pred, no_action_test])\n y_test = np.concatenate((positive_pred_labels, negative_pred_labels, no_action_test_labels))\n print('practical')\n\n return x_train, x_test, y_train, y_test\n" }, { "alpha_fraction": 0.5570809245109558, "alphanum_fraction": 0.5744219422340393, "avg_line_length": 40.31343460083008, "blob_id": "b6186f001041655028cc905dc82e6e846c7fec17", "content_id": "095fcb533f1c095065ddc10ff62020d435f93a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2768, "license_type": "no_license", "max_line_length": 136, "num_lines": 67, "path": "/evaluation_code_and_models/production_train.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import argparse\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom joblib import dump, load\n\nfrom dataprocessing.preprocessing import loadAndCleanDataMaxDom\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Train classifier in one year, tune its hyperparameters with 10 fold cross validation')\n\n parser.add_argument('--sources', '-s',\n type=str,\n default=\"1111\",\n help='what datasets to use in a binary pattern, reputation + lexicographic, passivedns, whois, activedns')\n\n parser.add_argument('--tuning', '-t',\n type=bool,\n default=False,\n help='Whether to tune or take hyperparameters of past')\n\n parser.add_argument('--year', '-y',\n type=str,\n default='2017',\n help='year to consider')\n\n\n\n args = parser.parse_args()\n sourcepattern = args.sources\n tune = args.tuning\n year = args.year\n\n features, labels, post_analysis_labels = loadAndCleanDataMaxDom(sourcepattern, False, year)\n print(labels.sum())\n metrics = {'f1': [], 'precision': [], 'recall': [], 'auc': [], 'acc_train': [], 'acc_test': [], 'eer': []}\n data = {'x_test': np.empty((0, features.shape[1])), 'y_test': np.empty((0,)), 'y_pred': np.empty((0,)),\n 'importance': np.zeros(len(features.columns)), 'agg_scores_train': [], 'agg_scores_test': [],\n 'labels_train': [], 'labels_test': [], 'estimators': [],\n 'y_post': np.empty((0, post_analysis_labels.shape[1])),\n 'domainname_test': []}\n\n param_grid = [\n {'loss': ['deviance', 'exponential'], 'learning_rate': [2 ** x for x in range(-5, 2, 1)], \\\n 'n_estimators': [2 ** x for x in range(4, 10)], 'min_samples_split': [2, 3, 4, 6], \\\n 'min_samples_leaf': [2, 3, 4, 6], 'max_features': ['auto', 'log2', 'sqrt'], 'max_depth': [3, 6, 12]}\n ]\n\n if tune:\n gbc = GradientBoostingClassifier()\n clf = GridSearchCV(gbc, param_grid, cv=10, scoring='f1', n_jobs=4)\n clf.fit(features, labels)\n params = clf.best_params_\n else:\n clf_tuned = load('models/2017/model' + sourcepattern + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=44)\n clf.fit(features, labels)\n\n dump(clf, 'models/' + year + '/model' + sourcepattern + '.joblib')\n" }, { "alpha_fraction": 0.6297352313995361, "alphanum_fraction": 0.6396470069885254, "avg_line_length": 49.1020393371582, "blob_id": "1eb0ad94cc2fd70e818334493404105f88348654", "content_id": "ff97e34c163f7ac999efbcec08eba3fcbbb10da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7365, "license_type": "no_license", "max_line_length": 123, "num_lines": 147, "path": "/evaluation_code_and_models/ensemble_evaluation.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport utils\nimport argparse\n\nfrom joblib import load\nimport itertools\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score, accuracy_score\nfrom evaluation.metrics import workReducedPostLoadThr, workReducedPostDetermineThrOneGo, workReducedPostDetermineThr\n\nimport dataprocessing.preprocessing as pre\nimport macroify\n\nimport bob.measure\n\n'''Evaluates the ensemble when training and testing on the same year. Thus, executes experiment.py for every model.'''\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Do the avalanche experiments')\n\n parser.add_argument('--year', '-y',\n type=str,\n default='2018',\n help='year to consider')\n\n args = parser.parse_args()\n year = args.year\n\n available, reputation, dns, whois, openintel, label = pre.loadAndCleanDataPerDataSet(False, year)\n\n total_fp = 0\n total_fn = 0\n total_manual = 0\n total_pred = 0\n total_amount_of_domains = len(available.index)\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_positive = labelzsss.sum()\n total_amount_negative = len(labelzsss.index) - labelzsss.sum()\n l = [False, True]\n dfs = []\n codesz = []\n ensemble_predictions = []\n ensemble_labels = []\n ensemble_scores_pos = []\n ensemble_scores_neg = []\n ensemble_predictions_priori = []\n ensemble_labels_priori = []\n metrics = {'f1': [], 'precision': [], 'recall': [], 'acc_train': [], 'acc_test': [], 'eer': [],\n 'fnr_work_reduced': [],\n 'fpr_work_reduced': [], 'work_reduced': [], 'work_reduced_negative': [], 'work_reduced_positive': []}\n\n i = 1\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n for train_index, test_index in kf.split(available.values, label):\n # Df index.\n df_train_ind, df_test_ind = available.iloc[train_index].index, available.iloc[test_index].index\n\n for x in itertools.product(l, repeat=4):\n code = ''.join(['1' if i else '0' for i in x])\n if code != '0000':\n features_maxdata, labelzz_max_data, _ = pre.loadAndCleanDataMaxDom(code, False, year)\n labelzz_max_data = pd.Series(labelzz_max_data, index=features_maxdata.index)\n\n tr_index = df_train_ind.intersection(features_maxdata.index)\n te_index = df_test_ind.intersection(features_maxdata.index)\n\n x_train, x_test = features_maxdata.loc[tr_index], features_maxdata.loc[te_index]\n y_train, y_test = labelzz_max_data.loc[tr_index], labelzz_max_data.loc[te_index]\n\n clf_tuned = load('models/'+ year +'/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=42)\n clf.fit(x_train, y_train)\n\n y_pred = clf.predict(x_test)\n scores = clf.predict_proba(x_test)\n\n features, labelzz = pre.loadAndCleanDataExactPattern(x, available, reputation, dns, whois, openintel,\n label)\n ind_now_in_test_set = features.index.intersection(df_test_ind)\n features = features.loc[ind_now_in_test_set]\n labelzz = labelzz.loc[ind_now_in_test_set]\n amount_of_domains = len(features.index)\n codesz.append(code)\n print(amount_of_domains, 'domains to classify for sourcepattern', code)\n if len(labelzz.index != 0):\n\n scores = clf.predict_proba(features)\n predictions = clf.predict(features)\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, _, _ = workReducedPostDetermineThrOneGo(x_train, y_train, code, scores, labelzz)\n\n total_fp += (len(positive_pred) - sum(positive_pred))\n total_fn += sum(negative_pred)\n total_manual += len(no_action_pred)\n total_pred += (len(positive_pred) + len(negative_pred))\n\n ensemble_predictions = ensemble_predictions + [1] * len(positive_pred) + [0] * len(\n negative_pred) + no_action_pred\n ensemble_labels = ensemble_labels + positive_pred + negative_pred + no_action_pred\n ensemble_predictions_priori = ensemble_predictions_priori + predictions.tolist()\n ensemble_labels_priori = ensemble_labels_priori + labelzz.values.tolist()\n ensemble_scores_neg = ensemble_scores_neg + scores[:, 1][labelzz == 0].tolist()\n ensemble_scores_pos = ensemble_scores_pos + scores[:, 1][labelzz == 1].tolist()\n\n print('Makes a prediction for', (len(positive_pred) + len(negative_pred)), 'domains')\n print('Would predict', np.sum(predictions), 'domains malicious')\n\nprint('Total work reduced', (total_amount_of_domains-total_manual)/total_amount_of_domains)\nprint('Total FNR', total_fn/total_amount_positive)\nprint('Total FPR', total_fp/total_amount_negative)\n\nprint('Accuracy', accuracy_score(ensemble_labels, ensemble_predictions))\nprint('F1', f1_score(ensemble_labels, ensemble_predictions))\nprint('Precision', precision_score(ensemble_labels, ensemble_predictions))\nprint('Recall', recall_score(ensemble_labels, ensemble_predictions))\n\nprint('Little check', total_amount_positive+total_amount_negative == total_amount_of_domains)\nprint('Little check', total_pred+total_manual == total_amount_of_domains)\n\nresults = {}\ny = utils.translateyear(year)\nz = utils.translateyear(year)\n\nresults[y+z+'workreduced'+ 'posteriori'] = (total_amount_of_domains-total_manual)/total_amount_of_domains *100\nresults[y+z+'fnr'+ 'posteriori'] = total_fn/total_amount_positive *100\nresults[y+z+'fpr'+ 'posteriori'] = total_fp/total_amount_negative *100\nresults[y+z+'accuracy'+ 'posteriori'] = accuracy_score(ensemble_labels, ensemble_predictions) *100\nresults[y+z+'fone'+ 'posteriori'] = f1_score(ensemble_labels, ensemble_predictions) *100\nresults[y+z+'precision'+ 'posteriori'] = precision_score(ensemble_labels, ensemble_predictions) *100\nresults[y+z+'recall' + 'posteriori'] = recall_score(ensemble_labels, ensemble_predictions) *100\nresults[y+z+'accuracy'] = accuracy_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\nresults[y+z+'fone'] = f1_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\nresults[y+z+'precision'] = precision_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\nresults[y+z+'recall'] = recall_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\nresults[y+z+'eer'] = bob.measure.eer(ensemble_scores_neg,ensemble_scores_pos) *100\n\n\nmacroify.append_file(results)\n" }, { "alpha_fraction": 0.7680282592773438, "alphanum_fraction": 0.7826525568962097, "avg_line_length": 115.64705657958984, "blob_id": "50031a60502985a67e0943f98927eafa2abde7a3", "content_id": "26b00d5557aad50b985fa0728799c7a91af1a79f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1983, "license_type": "no_license", "max_line_length": 216, "num_lines": 17, "path": "/README.md", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "# A Practical Approach for Taking Down Avalanche Botnets Under Real-World Constraints\n\n*By Victor Le Pochat, Tim Van hamme, Sourena Maroofi, Tom Van Goethem, Davy Preuveneers, Andrzej Duda, Wouter Joosen, and Maciej Korczyński*\n\nThis repository contains the source code and models of our NDSS 2020 paper [A Practical Approach for Taking Down Avalanche Botnets Under Real-World Constraints](https://lepoch.at/files/avalanche-ndss20.pdf).\n\n* `feature_generation` contains the code for parsing raw input data, extracting feature values and ground truth, and exporting them to input files for the machine learning classifier.\n* `evaluation_code_and_models` contains the code for the evaluation and the models that were trained during it. The evaluation procedure that is followed can be found in the `paper.sh` bash script, it is as follows:\n 1. train the models within 1 year by using `production_train.py`, do this for all dataset combinations and both the 2017 and 2018 iterations\n 2. evaluate the performance of every iteration and every dataset combination by using `experiment.py`, this also finds the thresholds for the work reduced metric\n 3. do the above evaluation for the full ensemble by calling `ensemble_evaluation.py`\n 4. evaluate ensemble performance when trained on one iteration and tested on another by calling `incremental_learning_evaluation.py`\n 5. evaluate the extended model trained on 2017 data + a part of 2018 data by calling `incremental_learning_evaluation.py`\n 6. the dataset impact evaluation for both the extended and base models are found in `dataset_impact_evaluation_extended.py` and `dataset_impact_evaluation.py`\n* The evaluation code depends on scikitk-learn for training the models. To obtain the equal error rate evaluation metric we rely on [bob suite](https://www.idiap.ch/software/bob/). Other used packages: numpy, pandas.\n\nDue to the sensitivity of the ground truth provided by law enforcement and commercial agreements for the third-party data sets, we cannot share the raw input data.\n" }, { "alpha_fraction": 0.5925883650779724, "alphanum_fraction": 0.597062885761261, "avg_line_length": 46.89560317993164, "blob_id": "1cb775acb0671be01e4ba798c2a782cc80341743", "content_id": "6847f072e845878b93848dbb89e77f819217d033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8716, "license_type": "no_license", "max_line_length": 181, "num_lines": 182, "path": "/feature_generation/generate_feature_values.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import csv\nimport datetime\nimport os\nimport re\nimport traceback\n\nfrom feature_generation.features import FeatureSet\n\ndef traverse_file(file, input_new_domain):\n orig_new_domain = input_new_domain\n new_domain = re.sub(r'[^\\w]', '', input_new_domain) if input_new_domain else input_new_domain\n return_line = None\n if file not in file_traversal_cache:\n file_traversal_cache[file] = {\"next_domain\": None, \"next_record\": None, \"finished\": False}\n orig_next_domain = file_traversal_cache[file][\"next_domain\"]\n if orig_next_domain:\n next_domain = re.sub(r'[^\\w]', '', orig_next_domain)\n else:\n next_domain = orig_next_domain\n while not file_traversal_cache[file][\"finished\"] and (not orig_next_domain or not new_domain or next_domain < new_domain\n or ((next_domain == new_domain) and (orig_next_domain != input_new_domain))) :\n try:\n next_record = next(file)\n file_traversal_cache[file][\"next_record\"] = next_record\n orig_next_domain = next_record[0].lower()\n next_domain = re.sub(r'[^\\w]', '', orig_next_domain)\n file_traversal_cache[file][\"next_domain\"] = orig_next_domain\n if not new_domain:\n new_domain = next_domain\n except StopIteration:\n file_traversal_cache[file][\"next_record\"] = None\n file_traversal_cache[file][\"finished\"] = True\n if new_domain and next_domain == new_domain and orig_new_domain == orig_next_domain:\n next_record = file_traversal_cache[file][\"next_record\"]\n return_line = next_record.copy() if next_record else None\n return return_line\n\ndef generate(formatted_snapshot_date, classification_types, input_folder, output_folder):\n snapshot_date = datetime.datetime.strptime(formatted_snapshot_date[:8], \"%Y%m%d\")\n\n feature_names = FeatureSet.get_feature_names()\n remove_sinkholed = True\n sinkholed_index = feature_names.inpredex(\"known_sinkhole\")\n if remove_sinkholed:\n del feature_names[sinkholed_index]\n\n sinkholed_removed_count = 0\n total_count = 0\n classes_counts = {c:0 for c in classification_types}\n\n abridged = False\n\n if abridged:\n dataset_check_descriptors = \"DNSDB WHOIS Renewal Validity OpenIntel\".split(\" \")\n else:\n dataset_check_descriptors = \"DNSDB WHOIS Alexa Umbrella Majestic Quantcast Suffix Renewal Validity Wordlist Wayback CT OpenIntel\".split(\n \" \")\n\n for classification_type in classification_types:\n agd_path = \"{}/{}/{}.csv\".format(input_folder, formatted_snapshot_date, classification_type)\n try:\n feature_output_file = open(\"{}/{}/feature_values_{}.csv\".format(output_folder, formatted_snapshot_date, classification_type), \"w\")\n feature_output = csv.writer(feature_output_file)\n\n # alphabetically ordered data sets\n agd_file = open(agd_path)\n agd_csvr = csv.reader(agd_file)\n # _header = next(malware_csvr) -- header stripped\n\n dnsdb_pdns_file = open(\"{}/{}/dnsdb_results_snapshot.csv\".format(input_folder, formatted_snapshot_date))\n dnsdb_pdns_csvr = csv.reader(dnsdb_pdns_file)\n\n whois_file = open(\"{}/{}/whois_data_snapshot.csv\".format(input_folder, formatted_snapshot_date))\n whois_csvr = csv.reader(whois_file)\n whois_header = next(whois_csvr)\n if not whois_header[0].startswith(\"domain\"):\n raise ValueError(\"Incorrect header on WHOIS file!\")\n\n all_toplists_data = {}\n for provider in \"alexa majestic quantcast umbrella\".split(\" \"):\n all_toplists_data[provider] = {}\n with open(\"{}/{}/topsites_results_{}.csv\".format(input_folder, formatted_snapshot_date, provider)) as toplists_data:\n toplists_csvr = csv.reader(toplists_data)\n for row in toplists_csvr:\n domain, occurrences, ranksum = row\n all_toplists_data[provider][domain] = (int(occurrences), float(ranksum) if ranksum else None)\n\n suffix_file = open(\"{}/{}/suffix_results.csv\".format(input_folder, formatted_snapshot_date))\n suffix_csvr = csv.reader(suffix_file)\n\n if os.path.exists(\"{}/{}/renewal_results.csv\".format(input_folder, formatted_snapshot_date)):\n renewal_file = open(\"{}/{}/renewal_results.csv\".format(input_folder, formatted_snapshot_date))\n renewal_csvr = csv.reader(renewal_file)\n else:\n renewal_file = None\n renewal_csvr = None\n\n if os.path.exists(\"{}/{}/whois_validity_data.csv\".format(input_folder, formatted_snapshot_date)):\n whois_validity_file = open(\"{}/{}/whois_validity_data.csv\".format(input_folder, formatted_snapshot_date))\n whois_validity_csvr = csv.reader(whois_validity_file)\n else:\n whois_validity_file = None\n whois_validity_csvr = None\n\n wordlist_based_file = open(\"{}/{}/wordlist_based_results.csv\".format(input_folder, formatted_snapshot_date))\n wordlist_based_csvr = csv.reader(wordlist_based_file)\n\n feature_output.writerow(feature_names)\n\n dataset_check_results = []\n\n for domain_data in agd_csvr:\n domain = domain_data[0]\n\n whois_line = traverse_file(whois_csvr, domain)\n if whois_line:\n if domain != whois_line[0]:\n print(domain, whois_line)\n whois_data = dict(zip(whois_header, whois_line)) if whois_line else None\n dnsdb_pdns_data = traverse_file(dnsdb_pdns_csvr, domain)\n toplists_data = {}\n for provider in \"alexa majestic quantcast umbrella\".split(\" \"):\n toplists_data[provider] = all_toplists_data[provider].get(domain, None)\n suffix_data = traverse_file(suffix_csvr, domain)\n renewal_data = traverse_file(renewal_csvr, domain) if renewal_csvr else None\n whois_validity_data = traverse_file(whois_validity_csvr, domain) if whois_validity_csvr else None\n wordlist_based_data = traverse_file(wordlist_based_csvr, domain)\n # openintel_adns_data = traverse_file(openintel_adns_csvr, domain)\n\n fs = FeatureSet(domain, snapshot_date, domain_data, dnsdb_pdns_data, whois_data, toplists_data, suffix_data, renewal_data, whois_validity_data, wordlist_based_data)\n\n try:\n fs.generate_feature()\n exported_feature = fs.export()\n if remove_sinkholed:\n if exported_feature[sinkholed_index] != None:\n sinkholed_removed_count += 1\n continue\n else:\n total_count += 1\n classes_counts[classification_type] += 1\n del exported_feature[sinkholed_index]\n feature_output.writerow(exported_feature)\n\n datasets_available = fs.check_datasets(abridged=abridged)\n dataset_check_results.append(datasets_available)\n\n except: # feature generation failed\n traceback.print_exc()\n continue\n finally:\n agd_file.close()\n dnsdb_pdns_file.close()\n whois_file.close()\n suffix_file.close()\n if renewal_file: renewal_file.close()\n if whois_validity_file: whois_validity_file.close()\n wordlist_based_file.close()\n\n print(classification_type, snapshot_date, \"(stats after sinkholing)\")\n for idx, results_row in enumerate(zip(*dataset_check_results)):\n print(dataset_check_descriptors[idx].ljust(15), str(len([r for r in results_row if r is not False])).rjust(6), str(len([r for r in results_row if r is False])).rjust(6))\n\n print(\"Sinkholed domains\", sinkholed_removed_count)\n print(\"Retained domains\", total_count)\n print(\"Counts per class\", classes_counts)\n\n\nif __name__ == '__main__':\n input_tuples = [\n (\"20171129\", [\"no_action\", \"action_seize\"]),\n (\"20181129\", [\"no_action\", \"action_seize\"]),\n (\"20191129\", [\"no_action\", \"action_seize\"])\n ]\n\n input_folder = \"input_data\"\n output_folder = \"output_data\"\n\n for snapshot_date, classification_types in input_tuples:\n file_traversal_cache = {}\n\n generate(snapshot_date, classification_types, input_folder, output_folder)" }, { "alpha_fraction": 0.5863685011863708, "alphanum_fraction": 0.6191693544387817, "avg_line_length": 39.13675308227539, "blob_id": "2856af8e08da03d29a5087fb48e39568fa1e5c35", "content_id": "7252e46aed59c3f11569e440d4907509ed64f476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4695, "license_type": "no_license", "max_line_length": 96, "num_lines": 117, "path": "/evaluation_code_and_models/evaluation/preanalysis.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import numpy as np\n\nimport seaborn as sns\nfrom matplotlib import pyplot as plt\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.covariance import empirical_covariance\nfrom scipy.ndimage import gaussian_filter\n\ndef pcaAnalysis(features, labels, path):\n '''\n Do a PCA.\n :param features: the features of the data points\n :param labels: labels for data points\n :param path: paht to save to\n '''\n #### PCA\n pca = PCA()\n scl = StandardScaler()\n standardized = scl.fit_transform(features,labels)\n reduced = pca.fit_transform(standardized, labels)\n sum = np.zeros(pca.components_.shape[0])\n for row, sv in zip(pca.components_, pca.singular_values_):\n sum = sum + np.abs(row * sv)\n for i, v in enumerate(sorted(range(len(sum)), key=lambda k: sum[k])):\n print(str(i), '. ', features.columns[v])\n sns.heatmap(pca.inverse_transform(np.eye(features.shape[1])), cmap='hot', cbar=False)\n plt.xlabel('feature index')\n plt.ylabel('principal component')\n plt.savefig('figures/' + path + 'pcaheatmap.pdf')\n plt.close()\n\ndef covMatrix(features, labels, path):\n '''\n Calculate feature correlations\n :param features: feature values of the data points\n :param labels: labels of the data points\n :param path: path to save to\n '''\n scl = StandardScaler()\n standardized = scl.fit_transform(features, labels)\n corr = empirical_covariance(standardized)\n # mask = np.zeros_like(corr, dtype=np.bool)\n # mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(11, 9))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(corr, cmap=cmap, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig('figures/' + path + 'covmatrix.pdf')\n plt.close()\n\n filtered = gaussian_filter(np.abs(corr), sigma=2)\n # mask = np.zeros_like(corr, dtype=np.bool)\n # mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(11, 9))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(filtered, cmap=cmap, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig('figures/' + path + 'filtered.pdf')\n plt.close()\n\n filtered = gaussian_filter(np.clip(corr, a_min=-1, a_max=0), sigma=2)\n # mask = np.zeros_like(corr, dtype=np.bool)\n # mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(11, 9))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(filtered, cmap=cmap, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig('figures/' + path + 'negativecorrelated.pdf')\n plt.close()\n\n filtered = gaussian_filter(np.clip(corr, a_min=0, a_max=1), sigma=2)\n # mask = np.zeros_like(corr, dtype=np.bool)\n # mask[np.triu_indices_from(mask)] = True\n f, ax = plt.subplots(figsize=(11, 9))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(filtered, cmap=cmap, center=0,\n square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n plt.savefig('figures/' + path + 'positivecorrelatedcov.pdf')\n plt.close()\n\n abs_corr = np.abs(corr)\n mat = abs_corr[0:10,0:10]\n jj = np.sum(mat)/(mat.shape[0]*mat.shape[1])\n mat = abs_corr[0:10,10:21]\n jp = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n mat = abs_corr[0:10, 21:36]\n jw = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n mat = abs_corr[0:10, 36:]\n ja = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n\n mat = abs_corr[10:21, 10:21]\n pp = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n mat = abs_corr[10:21, 21:36]\n pw = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n mat = abs_corr[10:21, 36:]\n pa = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n\n mat = abs_corr[21:36, 21:36]\n ww = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n mat = abs_corr[21:36, 36:]\n wa = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n\n mat = abs_corr[36:, 36:]\n aa = np.sum(mat) / (mat.shape[0] * mat.shape[1])\n\n average = np.array([[jj, jp, jw, ja], [jp, pp, pw, pa], [jw, pw, ww, wa], [ja, pa, wa, aa]])\n f, ax = plt.subplots(figsize=(4,1.8))\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n sns.heatmap(average, cmap=cmap, center=0, vmax=0.2, annot=True,\n square=False, linewidths=.5, cbar_kws={\"shrink\": 1},\n xticklabels=[\"Joint\", \"Passive\\nDNS\", \"WHOIS\", \"Active\\nDNS\"],\n yticklabels=[\"Joint\", \"Passive DNS\", \"WHOIS\", \"Active DNS\"])\n plt.tight_layout()\n plt.savefig('figures/' + path + 'averageperdatasetcov.pdf',bbox_inches='tight', dpi=600)\n plt.close()" }, { "alpha_fraction": 0.5859124660491943, "alphanum_fraction": 0.6106077432632446, "avg_line_length": 45.97551727294922, "blob_id": "f991fe1c7965d26ec99c278041d5ef23d3bb15e7", "content_id": "5576fb049c673ec70af2e3785232f5aabe046603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24944, "license_type": "no_license", "max_line_length": 161, "num_lines": 531, "path": "/evaluation_code_and_models/evaluation/metrics.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nimport matplotlib.ticker\nimport matplotlib.patches as patches\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom joblib import load\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# import bob.measure\n\ndef workreduced(scores, labels, costs, plot, savemetrics, path):\n df = pd.DataFrame({'scores': scores, 'labels':labels, 'inverse_labels':np.bitwise_xor(labels.astype(int),np.ones(len(labels),dtype=int))})\n #FNR\n sorted_asc = df.sort_values('scores', ascending=True)\n cumsum = np.cumsum(sorted_asc['labels'])\n total_malicious = cumsum.iloc[-1]\n sorted_asc['cumsum'] = cumsum\n sorted_asc.index = range(0,len(sorted_asc.index))\n sorted_asc['malrate'] = sorted_asc['cumsum']/sorted_asc.index\n metricsfnr = []\n thresholdsfnr = []\n for c in costs:\n filtered = sorted_asc[sorted_asc['cumsum']/total_malicious < c]\n if len(filtered.index) != 0:\n # print(filtered.iloc[-1,2])\n ind = filtered.index[-1]\n metricsfnr.append(ind/len(sorted_asc.index))\n thresholdsfnr.append(filtered.loc[:,'scores'].iloc[-1])\n else:\n metricsfnr.append(0)\n thresholdsfnr.append(0)\n print('For cost', c, 'df fnr is empty')\n\n\n\n #FPR\n sorted_desc = df.sort_values('scores', ascending=False)\n cumsum = np.cumsum(sorted_desc['inverse_labels'])\n total_benign = cumsum.iloc[-1]\n sorted_desc['cumsum'] = cumsum\n sorted_desc.index = range(0,len(sorted_desc.index))\n sorted_desc['benignrate'] = sorted_desc['cumsum']\n metricsfpr = []\n thresholdsfpr = []\n\n for c in costs:\n filtered = sorted_desc[sorted_desc['cumsum'] / total_benign < c]\n if len(filtered.index) != 0:\n ind = filtered.index[-1]\n metricsfpr.append(ind / len(sorted_desc.index))\n thresholdsfpr.append(filtered.loc[:,'scores'].iloc[-1])\n else:\n metricsfpr.append(0)\n thresholdsfpr.append(1)\n print('For cost', c, 'df fpr is empty')\n\n if plot:\n plotBins(costs, sorted_desc, total_benign, sorted_asc, total_malicious, metricsfnr, metricsfpr, path, scores, labels)\n if savemetrics:\n saveBinMetrics(metricsfnr, metricsfpr, costs, thresholdsfnr, thresholdsfpr, path)\n\n return metricsfnr, metricsfpr\n\ndef workreducedThrBis(scores, labels, costs, plot, savemetrics, path):\n df = pd.DataFrame({'scores': scores, 'labels':labels, 'inverse_labels':np.bitwise_xor(labels.astype(int),np.ones(len(labels),dtype=int))})\n #FNR\n sorted_asc = df.sort_values('scores', ascending=True)\n cumsum = np.cumsum(sorted_asc['labels'])\n total_malicious = cumsum.iloc[-1]\n sorted_asc['cumsum'] = cumsum\n sorted_asc.index = range(0,len(sorted_asc.index))\n sorted_asc['malrate'] = sorted_asc['cumsum']/sorted_asc.index\n metricsfnr = []\n thresholdsfnr = []\n for c in costs:\n filtered = sorted_asc[sorted_asc['cumsum']/total_malicious < c]\n if len(filtered.index) != 0:\n # print(filtered.iloc[-1,2])\n ind = filtered.index[-1]\n metricsfnr.append(ind/len(sorted_asc.index))\n thresholdsfnr.append(filtered.loc[:,'scores'].iloc[-1])\n else:\n metricsfnr.append(0)\n thresholdsfnr.append(0)\n print('For cost', c, 'df fnr is empty')\n\n\n\n #FPR\n sorted_desc = df.sort_values('scores', ascending=False)\n cumsum = np.cumsum(sorted_desc['inverse_labels'])\n total_benign = cumsum.iloc[-1]\n sorted_desc['cumsum'] = cumsum\n sorted_desc.index = range(0,len(sorted_desc.index))\n sorted_desc['benignrate'] = sorted_desc['cumsum']\n metricsfpr = []\n thresholdsfpr = []\n\n for c in costs:\n filtered = sorted_desc[sorted_desc['cumsum'] / total_benign < c]\n if len(filtered.index) != 0:\n ind = filtered.index[-1]\n metricsfpr.append(ind / len(sorted_desc.index))\n thresholdsfpr.append(filtered.loc[:,'scores'].iloc[-1])\n else:\n metricsfpr.append(0)\n thresholdsfpr.append(1)\n print('For cost', c, 'df fpr is empty')\n\n if plot:\n plotBins(costs, sorted_desc, total_benign, sorted_asc, total_malicious, metricsfnr, metricsfpr, path, scores, labels)\n if savemetrics:\n saveBinMetrics(metricsfnr, metricsfpr, costs, thresholdsfnr, thresholdsfpr, path)\n\n return metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr\n\ndef workreducedThr(scores, labels, c):\n df = pd.DataFrame({'scores': scores, 'labels':labels, 'inverse_labels':np.bitwise_xor(labels.astype(int),np.ones(len(labels),dtype=int))})\n #FNR\n sorted_asc = df.sort_values('scores', ascending=True)\n cumsum = np.cumsum(sorted_asc['labels'])\n total_malicious = cumsum.iloc[-1]\n sorted_asc['cumsum'] = cumsum\n sorted_asc.index = range(0,len(sorted_asc.index))\n sorted_asc['malrate'] = sorted_asc['cumsum']/sorted_asc.index\n\n filtered = sorted_asc[sorted_asc['cumsum']/total_malicious < c]\n if len(filtered.index) != 0:\n # print(filtered.iloc[-1,2])\n ind = filtered.index[-1]\n metricsfnr = ind/len(sorted_asc.index)\n thresholdsfnr = filtered.loc[:,'scores'].iloc[-1]\n else:\n metricsfnr = 0\n thresholdsfnr = 0\n print('For cost', c, 'df fnr is empty')\n\n\n\n #FPR\n sorted_desc = df.sort_values('scores', ascending=False)\n cumsum = np.cumsum(sorted_desc['inverse_labels'])\n total_benign = cumsum.iloc[-1]\n sorted_desc['cumsum'] = cumsum\n sorted_desc.index = range(0,len(sorted_desc.index))\n sorted_desc['benignrate'] = sorted_desc['cumsum']\n filtered = sorted_desc[sorted_desc['cumsum'] / total_benign < c]\n if len(filtered.index) != 0:\n ind = filtered.index[-1]\n metricsfpr = ind / len(sorted_desc.index)\n thresholdsfpr = filtered.loc[:,'scores'].iloc[-1]\n else:\n metricsfpr = 0\n thresholdsfpr = 1\n print('For cost', c, 'df fpr is empty')\n\n return metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr\n\n\ndef plotBins(costs, sorted_desc, total_benign, sorted_asc, total_malicious, metricsfnr, metricsfpr, path, scores, labels):\n figsize = (6.4, 3.2)\n f = plt.figure(figsize=figsize)\n\n plt.semilogy(sorted_asc.index/len(sorted_asc.index)*100, sorted_asc['cumsum']/total_malicious *100, label='False negative rate')\n\n\n plt.semilogy((sorted_desc.index/len(sorted_desc.index)*100), (sorted_desc['cumsum']/total_benign *100)[::-1], label='False positive rate')\n plt.legend()\n plt.gca().yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n\n # find intersection of two curves\n isec = ((sorted_asc['cumsum']/total_malicious *100) - (sorted_desc['cumsum']/total_benign *100)[::-1]).abs()\n # plt.semilogy((isec.index/len(isec.index)*100), 100-isec)\n idxmin = ((100-isec).argsort()[0:2])\n print(isec[idxmin[0]-2:idxmin[0]+2])\n eer = ((sorted_asc['cumsum']/total_malicious *100).iloc[idxmin]).mean()\n print(\"eer\", eer)\n scores_neg = scores[labels == 0].tolist()\n scores_pos = scores[labels == 1].tolist()\n # eer = bob.measure.eer(scores_neg, scores_pos)*100\n\n plt.gca().yaxis.set_ticks([0.1,1,10,100,0.5,2])\n plt.gca().yaxis.set_ticklabels([\"0.1\"]+[\"1.0\"]+[\"10.0\"]+[\"100.0\"]+[\"0.5\"]+[\"2.0\"])\n plt.ylim((0.05,100))\n plt.xlim((0,100))\n plt.xlabel('Fraction of domains (%)')\n plt.ylabel('Error rate (%)')\n\n axis_to_data = plt.gca().transAxes + plt.gca().transData.inverted()\n data_to_axis = axis_to_data.inverted()\n\n half_spacing_between_arrows = 0.00\n\n for c, max_fnr, max_fpr in zip([costs[-1]],[metricsfnr[-1]], [metricsfpr[-1]]):\n # points_data = axis_to_data.transform([(0, c*100), (max_fnr*100, c*100)])\n print(data_to_axis.transform([(0, c*100), (max_fnr*100, c*100)]))\n # plt.hlines(c*100, 0, max*100, linestyles='dashed', colors=\"black\" if c == 0.02 else \"grey\")\n plt.annotate('', xytext=data_to_axis.transform([(0, c*100)])[0] + [0, half_spacing_between_arrows], textcoords='axes fraction',\n xy=data_to_axis.transform([(max_fnr*100, c*100)])[0] + [0, half_spacing_between_arrows], xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C2\" if c == 0.02 else \"lightgrey\", \"linestyle\": \"--\", \"linewidth\":1, \"shrinkA\": 0, \"shrinkB\": 0})\n plt.annotate('', xytext=data_to_axis.transform([(100, c*100)])[0] - [0, half_spacing_between_arrows], textcoords='axes fraction',\n xy=data_to_axis.transform([(100 - max_fpr*100, c*100)])[0] - [0, half_spacing_between_arrows], xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C3\" if c == 0.02 else \"lightgrey\", \"linestyle\": \"--\", \"linewidth\":1, \"shrinkA\": 0, \"shrinkB\": 0})\n if c == 0.02:\n plt.annotate('', xytext=data_to_axis.transform([(max_fnr * 100, c * 100)])[0],\n textcoords='axes fraction',\n xy=[data_to_axis.transform([(max_fnr * 100, 1)])[0][0], 0],\n xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C2\" , \"linestyle\": \"--\",\n \"linewidth\": 1, \"shrinkA\": 0, \"shrinkB\": 0})\n plt.annotate('', xytext=data_to_axis.transform([(100 - max_fpr * 100, c * 100)])[0],\n textcoords='axes fraction',\n xy=[data_to_axis.transform([(100 - max_fpr * 100, 1)])[0][0], 0],\n xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C3\" , \"linestyle\": \"--\",\n \"linewidth\": 1, \"shrinkA\": 0, \"shrinkB\": 0})\n ticks_list = list(plt.xticks()[0])\n ticks_list.remove(60)\n plt.xticks(ticks_list + [max_fnr * 100, 100 - max_fpr * 100])\n\n p = patches.Rectangle((0,eer), 100, 100-eer, linewidth=0, fill=None, hatch='///', color='lightgrey') # data_to_axis.transform([(5.1 * 100, 0)])[0]\n plt.gca().add_patch(p)\n\n bbox_props = dict(boxstyle=\"rarrow\", fc=\"white\", ec=\"C0\", lw=1)\n plt.text(50, eer, \"Equal error rate\", ha=\"center\", va=\"center\", rotation=0,\n size=10,\n bbox=bbox_props)\n\n plt.text(50, 25, \"Above equal error rate: use 100% of automated classification\", size=10, rotation=0,\n ha=\"center\", va=\"center\",\n bbox=dict(boxstyle=\"round\",\n ec=\"white\",\n facecolor=\"white\",\n )\n )\n f.savefig('figures/' + path + 'bins.pdf',bbox_inches='tight', dpi=600)\n plt.close()\n\ndef saveBinMetrics(metricsfnr, metricsfpr, costs, thresholdsfnr, thresholdsfpr, path):\n metricsfnr = [mfnr * 100 for mfnr in metricsfnr]\n metricsfpr = [mfpr * 100 for mfpr in metricsfpr]\n costs = [cost * 100 for cost in costs]\n sum = [x + y for x, y in zip(metricsfnr, metricsfpr)]\n df = pd.DataFrame({'fnr': metricsfnr, 'fpr': metricsfpr, 'thresholds_fnr': thresholdsfnr,\n 'thresholds_fpr': thresholdsfpr, 'sum': sum}, index=costs)\n df.to_csv('dfs/' + path + 'workreduced.csv')\n\n\ndef workReducedPost(lower, upper, scores, y_true):\n # scores lower than thresh, higher than threshold, in the middle. Calculate fraction and calculate metrics -> labels and predictions\n negative_pred = [l for s, l in zip(scores[:, 1], y_true) if s < lower]\n no_action_pred = [l for s, l in zip(scores[:, 1], y_true) if s >= lower and s <= upper]\n positive_pred = [l for s, l in zip(scores[:, 1], y_true) if s > upper]\n\n total_malicious = y_true.sum()\n total_benign = len(y_true) - total_malicious\n\n fnr = sum(negative_pred) / total_malicious\n fpr = (len(positive_pred) - sum(positive_pred)) / total_benign\n\n work_reduced_negative = len(negative_pred) / len(y_true)\n work_reduced_positive = len(positive_pred) / len(y_true)\n work_reduced = work_reduced_negative + work_reduced_positive\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, work_reduced_positive\n\n\ndef workReducedPostLoadThr(trainyear, code, scores, y_true):\n thresholds = pd.read_csv('dfs/' + trainyear + '/' + code + '_workreduced.csv', index_col=0).loc[:,\n ['thresholds_fnr', 'thresholds_fpr']]\n upper = thresholds.iloc[3, 1]\n lower = thresholds.iloc[3, 0]\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scores, y_true)\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, lower, upper\n\ndef workReducedPostDetermineThr(features, labels, code, scoreszz, y_true):\n fnr = []\n fpr = []\n thr_fnr = []\n thr_fpr = []\n\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n for train_index, test_index in kf.split(features.values, labels):\n # Split the training and testing data\n X_train, X_test = features.values[train_index], features.values[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n # Load parameters of the hyperparameter tuned model.\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=44)\n clf.fit(X_train, y_train)\n\n scores = clf.predict_proba(X_test)\n\n metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr = workreducedThr(scores[:,1], y_test, 0.02)\n fnr.append(metricsfnr)\n fpr.append(metricsfpr)\n thr_fnr.append(thresholdsfnr)\n thr_fpr.append(thresholdsfpr)\n\n fnr = np.array(fnr)\n fpr = np.array(fpr)\n thr_fnr = np.array(thr_fnr)\n thr_fpr = np.array(thr_fpr)\n print('FNR work reduced', fnr.mean(), '+/-', fnr.std())\n print('FPR work reduced', fpr.mean(), '+/-', fpr.std())\n print('Total work reduced', fnr.mean() + fpr.mean())\n print('Lower thr', thr_fnr.mean(), '+/-', thr_fnr.std())\n print('Upper thr', fpr.mean(), '+/-', fpr.std())\n print()\n\n lower, upper = thr_fnr.mean(), thr_fpr.mean()\n # lower, upper = thr_fnr.mean() - thr_fnr.std(), thr_fpr.mean() + thr_fpr.std()\n\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scoreszz, y_true)\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, lower, upper\n\n\ndef workReducedPostDetermineThrOneGo(features, labels, code, scoreszz, y_true):\n scores = []\n labelsz = []\n\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n for train_index, test_index in kf.split(features.values, labels):\n # Split the training and testing data\n X_train, X_test = features.values[train_index], features.values[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n # Load parameters of the hyperparameter tuned model.\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=44)\n clf.fit(X_train, y_train)\n\n s = clf.predict_proba(X_test)\n scores = np.append(scores, s[:,1])\n labelsz = np.append(labelsz, y_test)\n\n metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr = workreducedThr(scores, labelsz, 0.02)\n\n lower, upper = thresholdsfnr, thresholdsfpr\n # lower, upper = thr_fnr.mean() - thr_fnr.std(), thr_fpr.mean() + thr_fpr.std()\n\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scoreszz, y_true)\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, lower, upper\n\ndef workReducedPostDetermineThrOneGoBis(features, labels, code, scoreszz, y_true, stratify_classes, costs, plot=False,\n savemetrics=False, path=''):\n scores = []\n labelsz = []\n\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n for train_index, test_index in kf.split(features.values, stratify_classes):\n # Split the training and testing data\n X_train, X_test = features.values[train_index], features.values[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n\n # Load parameters of the hyperparameter tuned model.\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=44)\n clf.fit(X_train, y_train)\n\n s = clf.predict_proba(X_test)\n scores = np.append(scores, s[:,1])\n labelsz = np.append(labelsz, y_test)\n\n metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr = workreducedThrBis(scores, labelsz, costs,\n plot=plot, savemetrics=savemetrics, path=path)\n\n lower, upper = thresholdsfnr[-1], thresholdsfpr[-1]\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scoreszz, y_true)\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, thresholdsfnr, thresholdsfpr\n\n\ndef workReducedPostDetermineThrOneGoOneYear(features, labels, additional_features, addtional_labels, code, scoreszz, y_true):\n '''Only look for thresholds on the additional dataset'''\n\n scores = []\n labelsz = []\n\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n for train_index, test_index in kf.split(additional_features.values, addtional_labels):\n # Split the training and testing data\n X_train_add, X_test = additional_features.values[train_index], additional_features.values[test_index]\n y_train_add, y_test = addtional_labels[train_index], addtional_labels[test_index]\n\n X_train = np.concatenate((features.values, X_train_add))\n y_train = np.concatenate((labels, y_train_add))\n\n # Load parameters of the hyperparameter tuned model.\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=44)\n clf.fit(X_train, y_train)\n\n s = clf.predict_proba(X_test)\n scores = np.append(scores, s[:,1])\n labelsz = np.append(labelsz, y_test)\n\n metricsfnr, metricsfpr, thresholdsfnr, thresholdsfpr = workreducedThr(scores, labelsz, 0.02)\n\n # print('Total work reduced', metricsfnr, metricsfpr, metricsfnr + metricsfpr)\n # print('Lower thr', thresholdsfnr)\n # print('Upper thr', thresholdsfpr)\n # print()\n\n lower, upper = thresholdsfnr, thresholdsfpr\n # lower, upper = thr_fnr.mean() - thr_fnr.std(), thr_fpr.mean() + thr_fpr.std()\n\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scoreszz, y_true)\n\n return positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, lower, upper\n\n\ndef plotBinsGreyScale(costs, sorted_desc, total_benign, sorted_asc, total_malicious, metricsfnr, metricsfpr, path, scores, labels):\n figsize = (6.4, 3.2)\n f = plt.figure(figsize=figsize)\n\n plt.semilogy(sorted_asc.index/len(sorted_asc.index)*100, sorted_asc['cumsum']/total_malicious *100, label='False negative rate')\n\n\n plt.semilogy((sorted_desc.index/len(sorted_desc.index)*100), (sorted_desc['cumsum']/total_benign *100)[::-1], label='False positive rate')\n plt.legend()\n plt.gca().yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())\n\n # find intersection of two curves\n isec = ((sorted_asc['cumsum']/total_malicious *100) - (sorted_desc['cumsum']/total_benign *100)[::-1]).abs()\n # plt.semilogy((isec.index/len(isec.index)*100), 100-isec)\n idxmin = ((100-isec).argsort()[0:2])\n print(isec[idxmin[0]-2:idxmin[0]+2])\n eer = ((sorted_asc['cumsum']/total_malicious *100).iloc[idxmin]).mean()\n print(\"eer\", eer)\n scores_neg = scores[labels == 0].tolist()\n scores_pos = scores[labels == 1].tolist()\n # eer = bob.measure.eer(scores_neg, scores_pos)*100\n\n plt.gca().yaxis.set_ticks([0.1,1,10,100,0.5,2])\n plt.gca().yaxis.set_ticklabels([\"0.1\"]+[\"1.0\"]+[\"10.0\"]+[\"100.0\"]+[\"0.5\"]+[\"2.0\"])\n plt.ylim((0.05,100))\n plt.xlim((0,100))\n plt.xlabel('Fraction of domains (%)')\n plt.ylabel('Error rate (%)')\n\n axis_to_data = plt.gca().transAxes + plt.gca().transData.inverted()\n data_to_axis = axis_to_data.inverted()\n\n half_spacing_between_arrows = 0.00\n\n for c, max_fnr, max_fpr in zip([costs[-1]],[metricsfnr[-1]], [metricsfpr[-1]]):\n print(data_to_axis.transform([(0, c*100), (max_fnr*100, c*100)]))\n plt.annotate('', xytext=data_to_axis.transform([(0, c*100)])[0] + [0, half_spacing_between_arrows], textcoords='axes fraction',\n xy=data_to_axis.transform([(max_fnr*100, c*100)])[0] + [0, half_spacing_between_arrows], xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C2\" if c == 0.02 else \"lightgrey\", \"linestyle\": \"--\", \"linewidth\":1, \"shrinkA\": 0, \"shrinkB\": 0})\n plt.annotate('', xytext=data_to_axis.transform([(100, c*100)])[0] - [0, half_spacing_between_arrows], textcoords='axes fraction',\n xy=data_to_axis.transform([(100 - max_fpr*100, c*100)])[0] - [0, half_spacing_between_arrows], xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C3\" if c == 0.02 else \"lightgrey\", \"linestyle\": \"--\", \"linewidth\":1, \"shrinkA\": 0, \"shrinkB\": 0})\n if c == 0.02:\n plt.annotate('', xytext=data_to_axis.transform([(max_fnr * 100, c * 100)])[0],\n textcoords='axes fraction',\n xy=[data_to_axis.transform([(max_fnr * 100, 1)])[0][0], 0],\n xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C2\" , \"linestyle\": \"--\",\n \"linewidth\": 1, \"shrinkA\": 0, \"shrinkB\": 0})\n plt.annotate('', xytext=data_to_axis.transform([(100 - max_fpr * 100, c * 100)])[0],\n textcoords='axes fraction',\n xy=[data_to_axis.transform([(100 - max_fpr * 100, 1)])[0][0], 0],\n xycoords='axes fraction',\n arrowprops={'arrowstyle': '-|>', 'color': \"C3\" , \"linestyle\": \"--\",\n \"linewidth\": 1, \"shrinkA\": 0, \"shrinkB\": 0})\n ticks_list = list(plt.xticks()[0])\n ticks_list.remove(60)\n plt.xticks(ticks_list + [max_fnr * 100, 100 - max_fpr * 100])\n\n p = patches.Rectangle((0,eer), 100, 100-eer, linewidth=0, fill=None, hatch='///', color='lightgrey') # data_to_axis.transform([(5.1 * 100, 0)])[0]\n plt.gca().add_patch(p)\n\n bbox_props = dict(boxstyle=\"rarrow\", fc=\"white\", ec=\"C0\", lw=1)\n plt.text(50, eer, \"Equal error rate\", ha=\"center\", va=\"center\", rotation=0,\n size=10,\n bbox=bbox_props)\n\n plt.text(50, 25, \"Above equal error rate: use 100% of automated classification\", size=10, rotation=0,\n ha=\"center\", va=\"center\",\n bbox=dict(boxstyle=\"round\",\n ec=\"white\",\n facecolor=\"white\",\n )\n )\n f.savefig('figures/' + path + 'bins.pdf',bbox_inches='tight', dpi=600)\n plt.close()\n" }, { "alpha_fraction": 0.6234956383705139, "alphanum_fraction": 0.6332118511199951, "avg_line_length": 47.43315505981445, "blob_id": "2198ca6e335becce07c6e5ebe37f6f024ba3b1cd", "content_id": "424053b39ed8dc81873bd2bf79c09a659b6b1818", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9057, "license_type": "no_license", "max_line_length": 128, "num_lines": 187, "path": "/evaluation_code_and_models/production_evaluation.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport argparse\nimport json\n\nimport matplotlib.ticker\nimport matplotlib.patches as patches\nimport pandas as pd\nimport numpy as np\nimport utils\n\nfrom sklearn.preprocessing import StandardScaler, Binarizer, LabelEncoder, LabelBinarizer, OneHotEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.impute import SimpleImputer\nfrom joblib import load\nimport itertools\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score, accuracy_score\nfrom evaluation.metrics import workReducedPostLoadThr\n\nimport dataprocessing.preprocessing as pre\nimport macroify\n\n# import bob.measure\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef saveimportance(importances, featurenames):\n df = pd.DataFrame({'featurename':featurenames, 'score':importances})\n df = df.sort_values('score',ascending=False)\n print(df)\n df.to_csv('dfs/importance1.csv')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Do the avalanche experiments')\n\n parser.add_argument('--trainyear', '-tr',\n type=str,\n default='2017',\n help='year to consider')\n\n parser.add_argument('--testyear', '-te',\n type=str,\n default='2018',\n help='year to consider')\n\n args = parser.parse_args()\n\n testyear = args.testyear\n trainyear = args.trainyear\n results = {}\n y = utils.translateyear(trainyear)\n z = utils.translateyear(testyear)\n\n available, reputation, dns, whois, openintel, label = pre.loadAndCleanDataPerDataSet(False, testyear)\n\n total_fp = 0\n total_fn = 0\n total_manual = 0\n total_pred = 0\n total_amount_of_domains = len(available.index)\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_positive = labelzsss.sum()\n total_amount_negative = len(labelzsss.index) - labelzsss.sum()\n l = [False,True]\n dfs = []\n codesz = []\n ensemble_scores_pos = []\n ensemble_scores_neg = []\n ensemble_predictions = []\n ensemble_predictions_priori = []\n ensemble_labels_priori = []\n ensemble_labels = []\n metrics = { 'f1': [], 'precision': [], 'recall': [], 'acc_train': [], 'acc_test': [], 'eer': [], 'fnr_work_reduced': [],\n 'fpr_work_reduced': [], 'work_reduced': [], 'work_reduced_negative': [], 'work_reduced_positive': []}\n for x in itertools.product(l,repeat=4):\n code = ''.join(['1' if i else '0' for i in x])\n if code != '0000': # code[0] != '0'\n clf = load('models/' + trainyear + '/model' + code + '.joblib')\n features_maxdata, labelzz_max_data, _ = pre.loadAndCleanDataMaxDom(code, False, testyear)\n\n # Evaluate model performance on max domains\n predictions = clf.predict(features_maxdata)\n metrics['acc_test'].append(accuracy_score(labelzz_max_data, predictions))\n metrics['f1'].append(f1_score(labelzz_max_data, predictions))\n metrics['precision'].append(precision_score(labelzz_max_data, predictions))\n metrics['recall'].append(recall_score(labelzz_max_data, predictions))\n\n # Evaluate model performance work reduced\n scores = clf.predict_proba(features_maxdata)\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, _, _ = workReducedPostLoadThr(trainyear, code, scores, labelzz_max_data)\n metrics['work_reduced_negative'].append(work_reduced_negative)\n metrics['work_reduced_positive'].append(work_reduced_positive)\n metrics['work_reduced'].append(work_reduced)\n metrics['fnr_work_reduced'].append(fnr)\n metrics['fpr_work_reduced'].append(fpr)\n\n # Construct domains that should be classified by this model\n features, labelzz = pre.loadAndCleanDataExactPattern(x, available, reputation, dns, whois, openintel, label)\n amount_of_domains = len(features.index)\n codesz.append(code)\n print(amount_of_domains, 'domains to classify for sourcepattern', code)\n if len(labelzz.index != 0):\n print('With', amount_of_domains-labelzz.sum(), 'negative domains and', labelzz.sum(), 'positive domains')\n\n index = features.index\n scores = clf.predict_proba(features)\n predictions = clf.predict(features)\n df = pd.DataFrame(list(zip(predictions, scores[:,1], len(predictions)*[code])),\n index=features.index, columns=['classification 0=benign, 1=malicious', 'score', 'model code'])\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, _, _ = workReducedPostLoadThr(trainyear, code, scores, labelzz)\n\n total_fp += (len(positive_pred) - sum(positive_pred))\n total_fn += sum(negative_pred)\n total_manual += len(no_action_pred)\n total_pred += (len(positive_pred) + len(negative_pred))\n\n ensemble_predictions = ensemble_predictions + [1]*len(positive_pred) + [0]*len(negative_pred) + no_action_pred\n ensemble_labels = ensemble_labels + positive_pred + negative_pred + no_action_pred\n\n ensemble_predictions_priori = ensemble_predictions_priori + predictions.tolist()\n ensemble_labels_priori = ensemble_labels_priori + labelzz.values.tolist()\n\n dfs.append(df)\n\n ensemble_scores_neg = ensemble_scores_neg + scores[:, 1][labelzz == 0].tolist()\n ensemble_scores_pos = ensemble_scores_pos + scores[:, 1][labelzz == 1].tolist()\n\n print('Makes a prediction for', (len(positive_pred) + len(negative_pred)), 'domains')\n print('Would predict', np.sum(predictions), 'domains malicious')\n\n # Save predictions\n df = pd.concat(dfs)\n print(len(df.index),\" predictions made\")\n df.to_csv('dfs/predictions.csv')\n\n # Print performance per model\n print('===============================================================================')\n for key, value in metrics.items():\n if value:\n print('========== %s ============' % (key))\n for i,v in enumerate(value):\n print('Model %s: %.3f' % (codesz[i], v))\n # codestr = utils.translatecode(code)\n # results[y+z+key + codestr] = v\n print('===============================================================================')\n\n print('Total work reduced', (total_amount_of_domains-total_manual)/total_amount_of_domains)\n print('Total FNR', total_fn/total_amount_positive)\n print('Total FPR', total_fp/total_amount_negative)\n\n print('Accuracy', accuracy_score(ensemble_labels, ensemble_predictions))\n print('F1', f1_score(ensemble_labels, ensemble_predictions))\n print('Precision', precision_score(ensemble_labels, ensemble_predictions))\n print('Recall', recall_score(ensemble_labels, ensemble_predictions))\n\n print('Little check', total_amount_positive+total_amount_negative == total_amount_of_domains)\n print('Little check', total_pred+total_manual == total_amount_of_domains)\n\n results[y+z+'workreduced'+ 'posteriori'] = (total_amount_of_domains-total_manual)/total_amount_of_domains *100\n results[y+z+'fnr'+ 'posteriori'] = total_fn/total_amount_positive *100\n results[y+z+'fpr'+ 'posteriori'] = total_fp/total_amount_negative *100\n results[y+z+'accuracy'+ 'posteriori'] = accuracy_score(ensemble_labels, ensemble_predictions) *100\n results[y+z+'fone'+ 'posteriori'] = f1_score(ensemble_labels, ensemble_predictions) *100\n results[y+z+'precision'+ 'posteriori'] = precision_score(ensemble_labels, ensemble_predictions) *100\n results[y+z+'recall'+ 'posteriori'] = recall_score(ensemble_labels, ensemble_predictions) *100\n\n results[y + z + 'accuracy'] = accuracy_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[y + z + 'fone'] = f1_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[y + z + 'precision'] = precision_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[y + z + 'recall'] = recall_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[y + z + 'eer'] = bob.measure.eer(ensemble_scores_neg, ensemble_scores_pos) * 100\n # fpr, fnr = bob.measure.farfrr(ensemble_scores_neg, ensemble_scores_pos, 0.5)\n results[y + z + 'fpr'] = fpr*100\n results[y + z + 'fnr'] = fnr*100\n\n macroify.append_file(results)\n\n print('Little check 2', len(ensemble_scores_neg) + len(ensemble_scores_pos) == total_amount_of_domains)\n\n np.savez('dfs/' + trainyear + '_' + testyear + 'ensemble_det_curve.npz', pos=ensemble_scores_pos, neg=ensemble_scores_neg)\n" }, { "alpha_fraction": 0.5988538861274719, "alphanum_fraction": 0.6075931191444397, "avg_line_length": 46.482994079589844, "blob_id": "826124c5f320c42190f22644bf1a3050582c2163", "content_id": "e9b98c917576b269dc65f68bd60bce11517c3d82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6980, "license_type": "no_license", "max_line_length": 136, "num_lines": 147, "path": "/evaluation_code_and_models/experiment.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import argparse\nimport json\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split, KFold, StratifiedKFold\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score\n\nfrom joblib import dump, load\nimport evaluation.metrics as m\nimport evaluation.postanalysis as postan\nimport evaluation.preanalysis as prean\n\nfrom dataprocessing.preprocessing import loadAndCleanDataMaxDom\n\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n'''This script runs the experiments within one year. This allows to compute the estimated total work reduced'''\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Do the avalanche experiments within one year')\n\n parser.add_argument('--sources', '-s',\n type=str,\n default=\"0111\",\n help='what datasets to use in a binary pattern, reputation + lexicographic, passivedns, whois, activedns')\n\n parser.add_argument('--year', '-y',\n type=str,\n default='2017',\n help='year to consider')\n\n args = parser.parse_args()\n sourcepattern = args.sources\n year = args.year\n path = year + '/' + sourcepattern + '_'\n\n features, labels, post_analysis_labels = loadAndCleanDataMaxDom(sourcepattern, False, year)\n\n prean.pcaAnalysis(features,labels, path)\n prean.covMatrix(features, labels, path)\n\n print(\"Input sizes:\")\n print(\"Total\", len(labels), \"Negative (0, benign)\", (labels == 0).sum(), \"Positive (1, malicious)\", (labels == 1).sum())\n\n # pipeline\n i = 1\n kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=44)\n metrics = {'f1': [], 'precision': [], 'recall': [], 'auc': [], 'acc_train': [], 'acc_test': [], 'eer': []}\n data = {'x_test': np.empty((0, features.shape[1])), 'y_test': np.empty((0,)), 'y_pred': np.empty((0,)),\n 'importance': np.zeros(len(features.columns)), 'agg_scores_train': [], 'agg_scores_test': [],\n 'labels_train': [], 'labels_test': [], 'estimators':[], 'y_post': np.empty((0, post_analysis_labels.shape[1])),\n 'domainname_test':[]}\n for train_index, test_index in kf.split(features.values, labels):\n # Split the training and testing data\n X_train, X_test = features.values[train_index], features.values[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n y_post = post_analysis_labels.iloc[test_index].values\n domainname_test = features.index[test_index]\n\n # Load parameters of the hyperparameter tuned model. Note that we do not tune in each iteration.\n # It is possible that tuning within the split leads to other hyperparameters, however, the hyperparameters\n # should transfer quite well as the problem and data remains the same. At worst performance could be slightly better.\n clf_tuned = load('models/2017/model' + sourcepattern + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n pipe = Pipeline([('clf', GradientBoostingClassifier(**params))]) # ('scl', StandardScaler()),\n\n else:\n params = clf_tuned.best_params_\n pipe = Pipeline([('clf', GradientBoostingClassifier(random_state=44, **params))]) #('scl', StandardScaler()),\n\n # Train the model\n pipe.fit(X_train, y_train)\n\n # Calculate metrics for this split\n metrics['acc_train'].append(pipe.score(X_train, y_train))\n metrics['acc_test'].append(pipe.score(X_test, y_test))\n y_pred = pipe.predict(X_test)\n metrics['f1'].append(f1_score(y_test, y_pred))\n metrics['auc'].append(roc_auc_score(y_test, y_pred))\n metrics['precision'].append(precision_score(y_test, y_pred))\n metrics['recall'].append(recall_score(y_test, y_pred))\n\n # Some post processing information for this split\n data['x_test'] = np.append(data['x_test'], X_test, axis=0)\n data['y_test'] = np.append(data['y_test'], y_test)\n data['y_pred'] = np.append(data['y_pred'], y_pred)\n data['y_post'] = np.append(data['y_post'], y_post, axis=0)\n data['importance'] = np.sum([data['importance'], pipe.named_steps['clf'].feature_importances_], axis=0)\n data['estimators'].append(pipe.named_steps['clf'].estimators_)\n data['domainname_test'] = np.append(data['domainname_test'], domainname_test)\n\n malicious = X_test[y_test == 1]\n benign = X_test[y_test == 0]\n negatives = pipe.predict_proba(benign)[:, 1]\n positives = pipe.predict_proba(malicious)[:, 1]\n scores_test = pipe.predict_proba(X_test)[:, 1]\n scores_train = pipe.predict_proba(X_train)[:, 1]\n\n data['agg_scores_train'] = np.append(data['agg_scores_train'], scores_train)\n data['agg_scores_test'] = np.append(data['agg_scores_test'], scores_test)\n data['labels_train'] = np.append(data['labels_train'], y_train)\n data['labels_test'] = np.append(data['labels_test'], y_test)\n \n ind = []\n mean = []\n std = []\n print('===============================================================================')\n for key, value in metrics.items():\n if value:\n print('GBC pipeline test %s and std: %.3f +- %.3f' % (key, np.array(value).mean(), np.array(value).std()))\n ind.append(key)\n mean.append(np.array(value).mean())\n std.append(np.array(value).std())\n print('===============================================================================')\n df = pd.DataFrame({'mean': mean, 'std': std}, index=ind)\n df.to_csv('dfs/' + year + '/' + sourcepattern + '_' + 'performance_metrics.csv')\n\n costs = [0.001, 0.005, 0.01, 0.02]\n\n metricsfnr, metricsfpr = m.workreduced(data['agg_scores_test'], data['labels_test'], costs, plot= True, savemetrics=True, path=path)\n\n postan.saveFpFnDf(data['x_test'], data['y_test'], data['y_pred'], features.columns, data['domainname_test'], path)\n postan.saveimportance(data['importance'] / kf.n_splits, features.columns, path)\n\n for c, vfnr, vfpr in zip(costs, metricsfnr, metricsfpr):\n print('Testing: When a fnr and fpr of', c*100 , '% is acceptable, work saved is', vfnr , vfpr ,\n 'total', vfnr + vfpr )\n\n print('===============================================================================')\n\n distributions = dict()\n for name in features.columns:\n distributions[name] = []\n for estims in data['estimators']:\n postan.featureDistribution(features.columns, estims, distributions)\n\n with open('dfs/' + year + '/' + sourcepattern + '_' + 'thresholds.json', 'w') as fp:\n json.dump(distributions, fp)\n" }, { "alpha_fraction": 0.580809473991394, "alphanum_fraction": 0.6018367409706116, "avg_line_length": 43.98194122314453, "blob_id": "5f30db8bb8f476562a8db5c67af9c8dec083e7b7", "content_id": "4f811b81e627f1c6fd1f867958cc57b0246191f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 39853, "license_type": "no_license", "max_line_length": 263, "num_lines": 886, "path": "/feature_generation/features.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import csv\nimport datetime\nimport json\nimport os\nimport traceback\n\nimport dateutil.parser as dateparser\n\n# Reminder: this only applies to already registered domains, e.g. these domains would have to be seized if they turn out\n# to be malicious.\nimport feature_generation.retrieve_sinkhole_data as retrieve_sinkhole_data\n\nfamilies = ['Andromeda', 'Bolek', 'Citadel', 'CoreBot', 'Gozi2', 'Goznym', 'Goznym Stage 1', 'KINS', 'MS-Andromeda',\n 'Marcher', 'Matsnu', 'Nymaim', 'Pandabanker', 'Ranbyus', 'Rovnix', 'Smart App', 'Smoke Loader / Dofoil',\n 'TeslaCrypt', 'Tiny Banker', 'Trusteer App', 'Unknown', 'UrlZone', 'Vawtrak', 'Xswkit']\n# source: DGArchive https://dgarchive.caad.fkie.fraunhofer.de/site/families.html with manual corrections of family names\nmalware_family_validities = {'CoreBot': '2015-01-01',\n 'Gozi2': '2010-01-01', # 'Gozi (Days+Monthly+Seasonal)': '2010-01-01', cf. https://malpedia.caad.fkie.fraunhofer.de/details/win.isfb\n 'Goznym': '2016-01-01', 'Goznym Stage 1': '2016-01-01', # 'GozNym 2nd Stage': '2016-01-01',\n 'Matsnu': '2014-01-01', 'Nymaim': '2014-01-01', 'PandaBanker': '2016-08-01', 'Ranbyus': '2015-01-01', 'Rovnix': '2015-01-01',\n 'Tiny Banker': '2014-01-01', 'UrlZone': '2014-01-01', 'Vawtrak': '2016-01-01',\n\n 'Bobax': '2008-01-01', 'BeeBone': None, 'Blackhole': '2012-06-01', 'Bedep': '2015-01-01',\n 'Banjori': '2013-01-01', 'Bamital': '2010-11-01', 'Cryptolocker': '2013-01-01',\n 'CCleaner DGA': '2017-01-01', 'Conficker': '2008-11-01',\n 'Chinad': None, 'Chir': '2011-01-01', 'Darkshell': None, 'Dyre': '2014-01-01',\n 'DNS Changer': '2011-01-01', 'DiamondFox': '2015-01-01', 'DirCrypt': '2013-01-01',\n 'Emotet.C': '2014-10-01', 'EKforward': '2014-01-01', 'Feodo': '2012-02-01',\n 'Fobber': '2015-01-01',\n 'Gameover P2P': '2011-01-01', 'Gameover DGA': '2014-01-01',\n 'Gspy': None, 'Hesperbot': '2013-01-01', 'Infy': '2015-01-01', 'Locky': '2016-01-01',\n 'ModPack (Andromeda?)': '2016-01-01', 'Murofet': '2010-01-01',\n 'Mirai': '2016-12-01', 'MadMax DGA': '2015-01-01', 'Necurs': '2013-01-01',\n 'Omexo': None, 'Oderoor': '2013-01-01', 'Pushdo (TID version)': '2011-01-01',\n 'Pykspa 2': '2013-04-01', 'Proslikefan DGA': '2016-01-01',\n 'Pykspa': '2009-10-01', 'Pushdo': '2013-01-01', 'PadCrypt': '2016-01-01', 'QakBot': '2013-01-01',\n 'Qadars': '2016-01-01', 'Ramdo': '2013-01-01', 'Redyms': '2012-01-01',\n 'Ramnit': '2012-01-01', 'Symmi': '2014-01-01', 'SuppoBox': '2013-01-01',\n 'Sisron': '2013-01-01', 'Sphinx Zeus DGA': '2016-09-01', 'Szribi': '2007-01-01', 'Shifu': '2015-01-01',\n 'Sutra TDS': '2012-01-01', 'Simda': '2012-01-01', 'Tsifiri': None, 'Tempedreve': '2014-01-01',\n 'Tempedreve TDD': '2015-01-01', 'Torpig': '2007-01-01',\n 'Tofsee DGA': '2016-01-01', 'UD4': '2016-01-01', 'VolatileCedar': '2014-01-01',\n 'Vidro(TID)': None, 'Virut': '2011-08-01', 'WD': '2017-01-01',\n 'XxHex DGA': '2016-01-01'}\n\nwordlist_families = {\"Matsnu\", \"Gozi2\", \"SuppoBox\", # https://dgarchive.caad.fkie.fraunhofer.de/site/families.html, paper Plohmann (TDD-W type)\n \"Banjori\", \"Rovnix\", # https://arxiv.org/abs/1810.02023 (high 'smashword' score), in addition to previous sources\n \"Pizd\", # https://osint.bambenekconsulting.com/feeds/pizd-domlist.txt\n }\nshadowserver_sinkholes_ns = [\"ns1.kryptoslogicsinkhole.com\", \"ns2.kryptoslogicsinkhole.net\", \"ns3.kryptoslogicsinkhole.org\", \"ns4.kryptoslogicsinkhole.me\",\n\"b66.microsoftinternetsafety.net\", \"b67.microsoftinternetsafety.net\",\n'ns1.markmonitor.com', 'ns2.markmonitor.com', 'ns3.markmonitor.com', 'ns4.markmonitor.com', 'ns5.markmonitor.com', 'ns6.markmonitor.com', 'ns7.markmonitor.com',\n'ns1.i56a4c1dlzcdsohkwr.biz', 'ns2.i56a4c1dlzcdsohkwr.biz', 'ns3.i56a4c1dlzcdsohkwr.biz', 'ns4.i56a4c1dlzcdsohkwr.biz',\n\"ns1.honeybot.us\", \"ns2.honeybot.us\",\n\"sc-a.sinkhole.shadowserver.org\", \"sc-b.sinkhole.shadowserver.org\", \"sc-c.sinkhole.shadowserver.org\", \"sc-d.sinkhole.shadowserver.org\",\n'ns1.csof.net', 'ns2.csof.net', 'ns3.csof.net', 'ns4.csof.net',\n\"ns1.arbors1nkh0le.com\", \"ns1.arbor-sinkhole.net\", \"ns2.arbor-sinkhole.net\", \"ns1.asertdns.com\", \"ns2.asertdns.com\"]\nshadowserver_sinkholes_a = [\"82.112.184.197\"]\n\ninitialized = False\nauxiliary_data = {}\n\ndef initialize(formatted_snapshot_date):\n auxiliary_data[formatted_snapshot_date] = {}\n\n if os.path.exists(\"input_data/{}/disposable_email_addresses_exact.json\".format(formatted_snapshot_date)):\n with open(\"input_data/{}/disposable_email_addresses_exact.json\".format(formatted_snapshot_date)) as tempmail_exact_json:\n tempmail_exact = json.load(tempmail_exact_json)\n with open(\"input_data/{}/disposable_email_addresses_wildcard.json\".format(formatted_snapshot_date)) as tempmail_wildcard_json:\n tempmail_wildcard = json.load(tempmail_wildcard_json)\n tempmail_data = (tempmail_exact, tempmail_wildcard)\n else:\n tempmail_data = None\n\n auxiliary_data[formatted_snapshot_date][\"tempmail_data\"] = tempmail_data\n\n with open(\"input_data/{}/sinkhole_results.csv\".format(formatted_snapshot_date)) as sinkhole_csv:\n sinkhole_csvr = csv.reader(sinkhole_csv)\n auxiliary_data[formatted_snapshot_date][\"sinkhole_data\"] = {r[0]: True if r[1] == \"True\" else (False if r[1] == \"False\" else None) for r in sinkhole_csvr}\n\n with open(\"input_data/{}/wayback_results_domain.csv\".format(formatted_snapshot_date)) as wayback_domain_csv:\n wayback_domain_csvr = csv.reader(wayback_domain_csv)\n auxiliary_data[formatted_snapshot_date][\"wayback_domain_data\"] = {r[0]: r[1:] for r in wayback_domain_csvr}\n\n with open(\"input_data/{}/ct_results.txt\".format(formatted_snapshot_date)) as ct_csv:\n ct_csvr = csv.reader(ct_csv)\n auxiliary_data[formatted_snapshot_date][\"ct_data\"] = {r[0]: r[1:] for r in ct_csvr}\n\n if os.path.exists(\"input_data/{}/openintel_results.csv\".format(formatted_snapshot_date)):\n with open(\"input_data/{}/openintel_results.csv\".format(formatted_snapshot_date)) as openintel_csv:\n openintel_csvr = csv.reader(openintel_csv)\n auxiliary_data[formatted_snapshot_date][\"openintel_data\"] = {r[0]: r[1:] for r in openintel_csvr}\n else:\n auxiliary_data[formatted_snapshot_date][\"openintel_data\"] = {}\n\n\nopenintel_cap = 333 # nb days between 1 Jan and 29 Nov (inclusive)\n\n\nclass FeatureSet:\n @classmethod\n def get_feature_names(cls):\n return [func[2:] for func in dir(cls) if callable(getattr(cls, func)) and func.startswith(\"f_\")]\n\n def __init__(self, domain, snapshot_date, malware_data, pdns_data, whois_data, topsites_data, suffix_data, renewal_data, whois_validity_data, wordlist_based_data):#, adns_data):\n self.domain = domain\n self.snapshot_date = snapshot_date\n self.formatted_snapshot_date = snapshot_date.strftime(\"%Y%m%d\")\n\n if self.formatted_snapshot_date not in auxiliary_data:\n initialize(self.formatted_snapshot_date)\n\n self.malware_data = malware_data\n self.pdns_data = pdns_data\n self.whois_data = whois_data\n self.topsites_data = topsites_data\n self.suffix_data = suffix_data\n self.renewal_data = renewal_data\n self.whois_validity_data = whois_validity_data\n self.wordlist_based_data = wordlist_based_data\n self.adns_data = [self.domain] + auxiliary_data[self.formatted_snapshot_date][\"openintel_data\"][self.domain] if self.domain in auxiliary_data[self.formatted_snapshot_date][\"openintel_data\"] else None\n self.features = {}\n\n def check_datasets(self, abridged=True):\n if abridged:\n datasets_to_check = [self.pdns_data, self.whois_data,\n self.renewal_data, self.whois_validity_data,\n self.adns_data]\n else:\n datasets_to_check = [self.pdns_data, self.whois_data,\n self.topsites_data[\"alexa\"], self.topsites_data[\"umbrella\"],\n self.topsites_data[\"majestic\"], self.topsites_data[\"quantcast\"], self.suffix_data,\n self.renewal_data, self.whois_validity_data,\n self.wordlist_based_data,\n auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"].get(self.domain, None),\n auxiliary_data[self.formatted_snapshot_date][\"ct_data\"].get(self.domain, None),\n self.adns_data]\n\n result = [not dataset for dataset in datasets_to_check]\n return result\n\n def export(self):\n return [self.features[k] for k in FeatureSet.get_feature_names()]\n\n def generate_feature(self):\n for feature_name in FeatureSet.get_feature_names():\n self.features[feature_name] = getattr(FeatureSet, \"f_\" + feature_name)(self)\n\n def f_domain(self):\n return self.domain\n\n ### Malware-based features ###\n\n def f_malware_family(self):\n \"\"\"\n Type: categorical\n Indicates the family of malware that generated the DGA domain.\n Intuition: Some DGAs generate random strings, while others concatenate words from a wordlist. There is a higher\n chance that the latter collides with a benign domain.\n :return:\n \"\"\"\n return self.malware_data[1]\n\n def f_malware_validity_start(self):\n \"\"\"\n Type: numeric\n Start of validity of the AGD. (only for post-analysis)\n :return:\n \"\"\"\n return dateparser.parse(self.malware_data[2]).timestamp()\n\n def f_malware_validity_end(self):\n \"\"\"\n Type: numeric\n End of validity of the AGD. (only for post-analysis)\n :return:\n \"\"\"\n return dateparser.parse(self.malware_data[3]).timestamp()\n\n def f_malware_validity_length(self):\n \"\"\"\n Type: numeric\n Length in days of the period of validity of the AGD.\n Intuition: An AGD that is valid for a short period of time is potentially less likely to be registered by the\n malicious party upfront.\n :return:\n \"\"\"\n return (dateparser.parse(self.malware_data[3]) - dateparser.parse(self.malware_data[2])).days + 1\n\n def f_whois_registration_date(self):\n \"\"\"\n Type: date\n Creation date of the domain. Do not use in model.\n :return:\n \"\"\"\n if not self._is_whois_available(\"created_date\"):\n return None\n try:\n return self._parse_whois_date(self.whois_data[\"created_date\"]).timestamp()\n except:\n return None\n\n def f_whois_registration_and_family_start_date(self):\n \"\"\"\n Type: numeric\n Difference between start date of malware and creation date of the domain.\n Intuition: Sites with registration dates a long time\n before the malware started operating could be more likely to be benign.\n :return:\n \"\"\"\n family = self.malware_data[1]\n if family not in malware_family_validities or not malware_family_validities[family]:\n return None\n else:\n if not self._is_whois_available(\"created_date\"):\n return None\n try:\n return (self._parse_whois_date(self.whois_data[\"created_date\"]) -\n datetime.datetime.strptime(malware_family_validities[family], \"%Y-%m-%d\")).days\n except:\n return None\n\n def f_whois_registration_and_validity_start_date(self):\n \"\"\"\n Type: numeric\n Difference between start date of validity of the AGD and creation date of the domain.\n Intuition: Combining with the registration date of the AGD, sites with registration dates a long time\n before the validity of the AGD could be more likely to be benign.\n :return:\n \"\"\"\n if not self._is_whois_available(\"created_date\"):\n return None\n try:\n return (self._parse_whois_date(self.whois_data[\"created_date\"]) -\n dateparser.parse(self.malware_data[2])).days\n except:\n return None\n\n def f_malware_wordlist_based_dga(self):\n \"\"\"\n Type: categorical\n Indicates whether the DGA uses a wordlist to generate domains.\n Intuition: AGDs based on wordlists can resemble regular phrases and are therefore more likely to collide with legitimate domains.\n :return:\n \"\"\"\n if not self.wordlist_based_data:\n return None\n return self.wordlist_based_data[1] == \"True\" # self.malware_data[1] in wordlist_families\n\n ### Domain name features ###\n\n def f_domain_length(self):\n \"\"\"\n Type: numeric\n Length of the domain (without the suffix).\n Intuition: Shorter domains have a higher chance of collision with a benign domain.\n Source: FANCI; PREDATOR; Liu2017CCS; ?\n :return:\n \"\"\"\n if not self.suffix_data:\n return None\n return len(self.suffix_data[5] + self.suffix_data[4])\n\n def f_domain_digit_ratio(self):\n \"\"\"\n Type: numeric\n Proportion of digits over all characters (for the domain without the suffix).\n Intuition: Malicious domains / AGDs are more likely to contain digits.\n Source: EXPOSURE < ? ; FANCI\n :return:\n \"\"\"\n if not self.suffix_data:\n return None\n return sum(list(map(lambda x: 1 if x.isdigit() else 0, self.suffix_data[5] + self.suffix_data[4])))/len(self.suffix_data[5] + self.suffix_data[4])\n\n ### DNS features ###\n\n def f_known_sinkhole(self):\n \"\"\"\n Type: categorical (sinkdb|email|stamparm|none)\n Indicates whether the domain belongs to a known sinkhole (Evaluation Scheme - 4).\n Based on: A record + listing in SinkDB, whois email, A record + listing in Miroslav Stampar's sinkhole list.\n Intuition: Sinkholed domains shouldn't be seized.\n :return:\n \"\"\"\n try:\n if self.pdns_data and self.pdns_data[4] and any(auxiliary_data[self.formatted_snapshot_date][\"sinkhole_data\"].get(ip_address, False) for ip_address in eval(self.pdns_data[4])): # A records\n result = \"dns_a_sinkdb\"\n elif self.pdns_data and self.pdns_data[4] and any(ip_address in shadowserver_sinkholes_a for ip_address in eval(self.pdns_data[4])):\n result = \"dns_a_shadowserver\"\n elif self.pdns_data and self.pdns_data[5] and any(ns.strip(\".\") in shadowserver_sinkholes_ns for ns in eval(self.pdns_data[5])):\n result = \"dns_ns_stamparm\"\n elif self._is_whois_available(\"nameserver\") and any(\n ns.strip(\".\") in shadowserver_sinkholes_ns for ns in (\n eval(self.whois_data[\"nameserver\"]) if self.whois_data[\"nameserver\"].startswith(\"[\") else [\n self.whois_data[\"nameserver\"]])):\n result = \"whois_ns_stamparm\"\n elif self.pdns_data and self.pdns_data[4] and any(retrieve_sinkhole_data.check_against_stamparm_ip(ip_address) for ip_address in eval(self.pdns_data[4])):\n result = \"dns_a_stamparm\"\n elif self.pdns_data and self.pdns_data[5] and any(retrieve_sinkhole_data.check_against_stamparm_ns(ns.strip(\".\")) for ns in eval(self.pdns_data[5])):\n result = \"dns_ns_stamparm\"\n elif self._is_whois_available(\"nameserver\") and any(retrieve_sinkhole_data.check_against_stamparm_ns(ns.strip(\".\")) for ns in (eval(self.whois_data[\"nameserver\"]) if self.whois_data[\"nameserver\"].startswith(\"[\") else [self.whois_data[\"nameserver\"]])):\n result = \"whois_ns_stamparm\"\n elif self.pdns_data and self.pdns_data[5] and any(\n retrieve_sinkhole_data.check_against_alowaisheq_ns(ns.strip(\".\")) for ns in eval(self.pdns_data[5])):\n result = \"dns_ns_alowaisheq\"\n elif self._is_whois_available(\"nameserver\") and any(\n retrieve_sinkhole_data.check_against_alowaisheq_ns(ns.strip(\".\")) for ns in (\n eval(self.whois_data[\"nameserver\"]) if self.whois_data[\"nameserver\"].startswith(\"[\") else [\n self.whois_data[\"nameserver\"]])):\n result = \"whois_ns_alowaisheq\"\n elif self._is_whois_available(\"reg_email\") and retrieve_sinkhole_data.check_against_sinkhole_emails(self.whois_data[\"reg_email\"]):\n result = \"whois_email\"\n else:\n result = None\n return result\n except:\n traceback.print_exc()\n return None\n\n def f_dnsdb_available(self):\n return self.pdns_data is not None\n\n def f_dnsdb_nb_queries(self):\n \"\"\"\n Type: numeric\n Number of DNS queries observed for the domain. (from DNSDB)\n Intuition: Benign sites will actually receive (more) queries.\n Source: Lison2017BIGDATA\n :return:\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[3]\n\n def f_dnsdb_active_period(self):\n \"\"\"\n Type: numeric\n Time between last seen query and first seen query. (from DNSDB)\n Intuition: Sites active for longer are more likely to be benign.\n :return:\n \"\"\"\n if not self.pdns_data:\n return None\n return (datetime.datetime.strptime(self.pdns_data[2], \"%Y-%m-%d %H:%M:%S\") - datetime.datetime.strptime(self.pdns_data[1], \"%Y-%m-%d %H:%M:%S\")).seconds\n\n def f_dnsdbwhois_first_seen_after_registration(self):\n \"\"\"\n Type: numeric\n Time between first seen query and domain creation date. (from DNSDB + WHOIS)\n Intuition: Sites active quickly after registration are less likely to be dormant malicious domains.\n :return:\n \"\"\"\n if not self.pdns_data or not self._is_whois_available(\"created_date\"):\n return None\n return (datetime.datetime.strptime(self.pdns_data[1], \"%Y-%m-%d %H:%M:%S\") - self._parse_whois_date(self.whois_data[\"created_date\"])).seconds\n\n def f_dnsdb_first_seen_before_validity(self):\n \"\"\"\n Type: numeric\n Time between first seen query and AGD validity date. (from DNSDB)\n Intuition: Sites registered a long time before validity are more likely to be benign.\n :return:\n \"\"\"\n if not self.pdns_data:\n return None\n return (dateparser.parse(self.malware_data[2]) - datetime.datetime.strptime(self.pdns_data[1], \"%Y-%m-%d %H:%M:%S\")).seconds\n # return (datetime.datetime.strptime(self.malware_data[2], \"%Y-%m-%d %H:%M:%S\") - datetime.datetime.strptime(self.pdns_data[1], \"%Y-%m-%d %H:%M:%S\")).seconds\n\n def f_dnsdb_first_seen_before_now(self):\n \"\"\"\n Type: numeric\n Time between first seen query and domain creation date. (from DNSDB + WHOIS)\n Intuition: Sites active quickly after registration are less likely to be dormant malicious domains.\n :return:\n \"\"\"\n if not self.pdns_data:\n return None\n return (self.snapshot_date - datetime.datetime.strptime(self.pdns_data[1], \"%Y-%m-%d %H:%M:%S\")).seconds\n\n def f_dnsdb_record_A(self):\n \"\"\"\n Type: categorical (true|false)\n Record type A seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[6]\n\n def f_dnsdb_record_AAAA(self):\n \"\"\"\n Type: categorical (true|false)\n Record type AAAA seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[7]\n\n def f_dnsdb_record_CAA(self):\n \"\"\"\n Type: categorical (true|false)\n Record type CAA seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[8]\n\n def f_dnsdb_record_CNAME(self):\n \"\"\"\n Type: categorical (true|false)\n Record type CNAME seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[9]\n\n def f_dnsdb_record_HINFO(self):\n \"\"\"\n Type: categorical (true|false)\n Record type HINFO seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[10]\n\n def f_dnsdb_record_MX(self):\n \"\"\"\n Type: categorical (true|false)\n Record type MX seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[11]\n\n def f_dnsdb_record_NS(self):\n \"\"\"\n Type: categorical (true|false)\n Record type NS seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[12]\n\n def f_dnsdb_record_PTR(self):\n \"\"\"\n Type: categorical (true|false)\n Record type PTR seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[13]\n\n def f_dnsdb_record_RP(self):\n \"\"\"\n Type: categorical (true|false)\n Record type RP seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[14]\n\n def f_dnsdb_record_SOA(self):\n \"\"\"\n Type: categorical (true|false)\n Record type SOA seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[15]\n\n def f_dnsdb_record_SPF(self):\n \"\"\"\n Type: categorical (true|false)\n Record type SPF seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[16]\n\n def f_dnsdb_record_TXT(self):\n \"\"\"\n Type: categorical (true|false)\n Record type TXT seen on this domain (from DNSDB).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.pdns_data:\n return None\n return self.pdns_data[17]\n\n def f_openintel_available(self):\n return self.adns_data is not None\n\n def f_openintel_first_seen_before_now(self):\n \"\"\"\n Type: numeric\n Time between last seen query and first seen query. (from OpenIntel)\n Intuition: Sites active for longer are more likely to be benign.\n :return:\n \"\"\"\n if not self.adns_data:\n return None\n return ( min(openintel_cap,\n max(int(self.adns_data[1]) if self.adns_data[1] else 0,\n int(self.adns_data[2]) if self.adns_data[2] else 0,\n int(self.adns_data[3]) if self.adns_data[3] else 0,\n int(self.adns_data[4]) if self.adns_data[4] else 0,\n int(self.adns_data[15] if self.adns_data[15] else 0))\n ))\n\n def f_openintel_first_seen_before_validity(self):\n \"\"\"\n Type: numeric\n Time between last seen query and first seen query. (from OpenIntel)\n Intuition: Sites active for longer are more likely to be benign.\n :return:\n \"\"\"\n if not self.adns_data:\n return None\n if not self.adns_data[1] and not self.adns_data[2] and not self.adns_data[3] and not self.adns_data[4] and not self.adns_data[15]:\n return 0\n return (min(openintel_cap,\n max(int(self.adns_data[1]) if self.adns_data[1] else 0,\n int(self.adns_data[2]) if self.adns_data[2] else 0,\n int(self.adns_data[3]) if self.adns_data[3] else 0,\n int(self.adns_data[4]) if self.adns_data[4] else 0,\n int(self.adns_data[15] if self.adns_data[15] else 0))) +\n (dateparser.parse(self.malware_data[2]) - self.snapshot_date).days)\n\n def f_openintel_nb_days_seen_A(self):\n \"\"\"\n Type: numeric\n Record type A seen on this domain (from OpenIntel).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.adns_data:\n return None\n return min(openintel_cap,int(self.adns_data[5])) if self.adns_data[5] else 0\n\n def f_openintel_nb_days_seen_AAAA(self):\n \"\"\"\n Type: numeric\n Record type AAAA seen on this domain (from OpenIntel).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.adns_data:\n return None\n return min(openintel_cap,int(self.adns_data[6])) if self.adns_data[6] else 0\n\n def f_openintel_nb_days_seen_MX(self):\n \"\"\"\n Type: numeric\n Record type MX seen on this domain (from OpenIntel).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.adns_data:\n return None\n return min(openintel_cap,int(self.adns_data[8])) if self.adns_data[8] else 0\n\n def f_openintel_nb_days_seen_NS(self):\n \"\"\"\n Type: numeric\n Record type NS seen on this domain (from OpenIntel).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.adns_data:\n return None\n return min(openintel_cap,int(self.adns_data[7])) if self.adns_data[7] else 0\n\n def f_openintel_nb_days_seen_SOA(self):\n \"\"\"\n Type: numeric\n Record type SOA seen on this domain (from OpenIntel).\n Intuition: Benign sites may have certain 'rarer' record types.\n Source: Fraunhofer report\n \"\"\"\n if not self.adns_data:\n return None\n return min(openintel_cap,int(self.adns_data[14])) if self.adns_data[14] else 0\n\n ### Registration/WHOIS features ###\n\n def f_whois_available(self):\n return self.whois_data is not None\n\n def f_whois_registrar(self):\n \"\"\"\n Type: categorical\n The registrar used for the latest registration of the domain.\n Intuition: Malicious parties may prefer certain registrars e.g. due to low prices or few validity checks.\n Source: PREDATOR paper < Felegyhazi2010 + Hao2013\n :return:\n \"\"\"\n if not self._is_whois_available(\"registrar\"):\n return None\n if \"registrar_iana_id\" in self.whois_data and self.whois_data[\"registrar_iana_id\"]:\n return \"reg-{}\".format(self.whois_data[\"registrar_iana_id\"])\n else:\n return self.whois_data[\"registrar\"]\n\n def f_whois_registration_age(self):\n \"\"\"\n Type: numeric\n Length in days of the period between the date of registration and today. (~ Evaluation Scheme - 7)\n Intuition: Domains that have been registered a long time ago are more likely to be 'real' benign sites.\n Source: PREDENTIFIER\n :return:\n \"\"\"\n if not self._is_whois_available(\"created_date\"):\n return None\n try:\n return (self.snapshot_date - self._parse_whois_date(self.whois_data[\"created_date\"])).days\n except:\n return None\n\n def f_whois_registration_period(self):\n \"\"\"\n Type: numeric\n Length in days of the period for which a domain is registered. (~ Evaluation Scheme - 7)\n Intuition: Malicious domains will be registered for short periods (e.g. 1 year), while domains registered for\n a longer time are more likely to be benign.\n Source: PREDATOR\n\n Keep in mind (from \"WHOIS Lost In Translation\"):\n When a registrar does not renew or delete a domain before its expiration date, the registry automatically\n extends the registration by one year by moving the domain into the auto-renew state.\n :return:\n \"\"\"\n if (not self._is_whois_available(\"expired_date\")) or (not self._is_whois_available(\"created_date\")):\n return None\n try:\n return (self._parse_whois_date(self.whois_data[\"expired_date\"]) -\n self._parse_whois_date(self.whois_data[\"created_date\"])).days\n except:\n return None\n\n def f_whois_has_been_renewed(self):\n \"\"\"\n Type: categorical (true|false)\n Indicates whether a domain has been renewed.\n Intuition: Malicious domains are short-lived and therefore unlikely to be renewed.\n :return:\n \"\"\"\n return self.renewal_data[1] if self.renewal_data else None\n\n def f_whois_privacy(self):\n \"\"\"\n Type: categorical (true|false)\n The WHOIS privacy used for the domain, or None if no privacy service is used.\n Intuition: abusive domains tend to use Privacy and Proxy services\n (but using a WHOIS Privacy and Proxy is not a reliable indicator of malicious activity)\n ~ not using privacy/proxy -> rather benign; using it -> unknown\n Source: Cybercrime gTLDs Korczynski\n :return:\n \"\"\"\n for property in [\"reg_org\", \"reg_name\", \"reg_street\", \"reg_city\", \"reg_state\", \"reg_postal\", \"reg_country\", \"reg_email\", \"reg_phone\", \"reg_fax\", \"reg_id\"]:\n if self._is_whois_available(property):\n value = self.whois_data[property]\n for keyword in [\"privacy\", \"private\", \"proxy\", \"protect\", \"redacted\"]: # actively using privacy service\n if keyword in value.lower():\n return True\n return None\n\n def f_whois_temporary_mail(self):\n \"\"\"\n Type: categorical (true|false)\n The mail address used to register the domain belongs to a temporary mail service.\n Uses the data collected by `disposable_email_service.py`\n Intuition: malicious actors may not want to bother setting up 'real' mail addresses, and therefore resort to\n temporary mail services.\n :return:\n \"\"\"\n if not self._is_whois_available(\"reg_email\"):\n return None\n if \"@\" in self.whois_data[\"reg_email\"]:\n email_parts = self.whois_data[\"reg_email\"].split(\"@\")\n if len(email_parts) == 2:\n tempmail_data = auxiliary_data[self.formatted_snapshot_date][\"tempmail_data\"]\n return (email_parts[1].lower() in tempmail_data[0]) or any(d.endswith(email_parts[1].lower()) for d in tempmail_data[1])\n # domain in exact domains / wildcard domains\n else: # invalid email address / not checked\n return None\n else:\n return None\n\n def f_whois_valid_phone(self):\n \"\"\"\n Type: categorical\n 0 if the phone number provided in WHOIS is valid, 1 if invalid, 2 if not present.\n :return:\n \"\"\"\n if not self.whois_validity_data:\n return None\n status = self.whois_validity_data[3]\n return True if status == \"VALID\" else (False if status == \"INVALID\" else None)\n\n\n ### Top websites lists features (~ Evaluation Scheme - 1) ###\n\n def f_topsites_alexa_presence(self):\n \"\"\"\n Type: numeric\n Number of days when the domain appeared in Alexa's top websites list.\n Intuition: Presence over a long period suggests actual popularity and benignness.\n Source: ~ Lison2017BIGDATA\n :return:\n \"\"\"\n if not self.topsites_data[\"alexa\"]:\n return None\n return self.topsites_data[\"alexa\"][0]\n\n def f_topsites_alexa_average_rank(self):\n \"\"\"\n Type: numeric\n Average rank of the domain for all appearances in Alexa's top websites list.\n Intuition: Better ranks suggest actual popularity and benignness.\n Source: Lison2017BIGDATA\n :return:\n \"\"\"\n if not self.topsites_data[\"alexa\"]:\n return None\n average_rank = round(self.topsites_data[\"alexa\"][1]/self.topsites_data[\"alexa\"][0] if self.topsites_data[\"alexa\"][1] else 0)\n return average_rank if average_rank > 0 else None\n\n def f_topsites_umbrella_presence(self):\n \"\"\"\n Type: numeric\n Number of days when the domain appeared in Umbrella's top websites list.\n Intuition: Presence over a long period suggests actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"umbrella\"]:\n return None\n return self.topsites_data[\"umbrella\"][0]\n\n def f_topsites_umbrella_average_rank(self):\n \"\"\"\n Type: numeric\n Average rank of the domain for all appearances in Umbrella's top websites list.\n Intuition: Better ranks suggest actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"umbrella\"]:\n return None\n average_rank = round(self.topsites_data[\"umbrella\"][1]/self.topsites_data[\"umbrella\"][0] if self.topsites_data[\"umbrella\"][1] else 0)\n return average_rank if average_rank > 0 else None\n\n def f_topsites_majestic_presence(self):\n \"\"\"\n Type: numeric\n Number of days when the domain appeared in Majestic's top websites list.\n Intuition: Presence over a long period suggests actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"majestic\"]:\n return None\n return self.topsites_data[\"majestic\"][0]\n\n def f_topsites_majestic_average_rank(self):\n \"\"\"\n Type: numeric\n Average rank of the domain for all appearances in Majestic's top websites list.\n Intuition: Better ranks suggest actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"majestic\"]:\n return None\n average_rank = round(self.topsites_data[\"majestic\"][1]/self.topsites_data[\"majestic\"][0] if self.topsites_data[\"majestic\"][1] else 0)\n return average_rank if average_rank > 0 else None\n\n def f_topsites_quantcast_presence(self):\n \"\"\"\n Type: numeric\n Number of days when the domain appeared in Quantcast's top websites list.\n Intuition: Presence over a long period suggests actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"quantcast\"]:\n return None\n return self.topsites_data[\"quantcast\"][0]\n\n def f_topsites_quantcast_average_rank(self):\n \"\"\"\n Type: numeric\n Average rank of the domain for all appearances in Quantcast's top websites list.\n Intuition: Better ranks suggest actual popularity and benignness.\n :return:\n \"\"\"\n if not self.topsites_data[\"quantcast\"]:\n return None\n average_rank = round(self.topsites_data[\"quantcast\"][1]/self.topsites_data[\"quantcast\"][0] if self.topsites_data[\"quantcast\"][1] else 0)\n return average_rank if average_rank > 0 else None\n\n ### Content-based features ###\n\n def f_search_pages_found_wayback_machine(self):\n \"\"\"\n Type: numeric\n Number of scraped pages on the Wayback Machine.\n Intuition: many pages & found/crawled by search engine -> more likely to be real content\n <-> malicious: don't bother setting up a real website / not found\n :return:\n \"\"\"\n if self.domain not in auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"]:\n return None\n return auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"][self.domain][1]\n\n def f_search_wayback_machine_first_seen_before_now(self):\n \"\"\"\n Type: numeric\n Difference between the snapshot date and when the site was first seen on the Wayback Machine.\n Intuition: existing for longer time -> more likely to be benign\n :return:\n \"\"\"\n if self.domain not in auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"]:\n return None\n wayback_timestamp = auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"][self.domain][2]\n if wayback_timestamp == \"-1\":\n return None\n return (self.snapshot_date - datetime.datetime.strptime(wayback_timestamp, \"%Y%m%d%H%M%S\")).seconds\n\n def f_search_wayback_machine_first_seen_before_validity(self):\n \"\"\"\n Type: numeric\n Difference between the validity start date and when the site was first seen on the Wayback Machine.\n Intuition: existing for longer time -> more likely to be benign\n :return:\n \"\"\"\n if self.domain not in auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"]:\n return None\n wayback_timestamp = auxiliary_data[self.formatted_snapshot_date][\"wayback_domain_data\"][self.domain][2]\n if wayback_timestamp == \"-1\":\n return None\n return (dateparser.parse(self.malware_data[2]) - datetime.datetime.strptime(wayback_timestamp, \"%Y%m%d%H%M%S\")).seconds\n\n ### Certificate transparency logs ###\n\n def f_ct_has_certificate(self):\n \"\"\"\n Type: binary\n The domain had a certificate.\n Intuition: Acquiring a certificate requires (setup) effort, indicating benignness.\n :return:\n \"\"\"\n d = auxiliary_data[self.formatted_snapshot_date][\"ct_data\"].get(self.domain, None)\n if d:\n return d[0] == \"True\"\n else:\n return None\n\n ### Helper methods ###\n\n def _is_whois_available(self, field):\n return self.whois_data and field in self.whois_data and self.whois_data[field]\n\n def _parse_whois_date(self, whois_date):\n try:\n return dateparser.parse(whois_date.strip(\"[]':\")).replace(tzinfo=None)\n except:\n return None" }, { "alpha_fraction": 0.6277115941047668, "alphanum_fraction": 0.6513901352882385, "avg_line_length": 48.96183395385742, "blob_id": "28c24723663319fbdb5d971981c005b815c3ac3f", "content_id": "a3a9c26c8a487659dd0d6da388267597815535c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6546, "license_type": "no_license", "max_line_length": 128, "num_lines": 131, "path": "/evaluation_code_and_models/ensemble_2019_evaluation.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport argparse\nimport json\n\nimport matplotlib.ticker\nimport matplotlib.patches as patches\nimport pandas as pd\nimport numpy as np\nimport utils\n\nfrom sklearn.preprocessing import StandardScaler, Binarizer, LabelEncoder, LabelBinarizer, OneHotEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.impute import SimpleImputer\nfrom joblib import load\nimport itertools\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score, accuracy_score\nfrom evaluation.metrics import workReducedPostDetermineThrOneGoBis\nimport evaluation.preanalysis as prean\n\nimport dataprocessing.preprocessing as pre\nimport macroify\n\nif __name__ == \"__main__\":\n results = {}\n\n features_2017, labelzz_2017, _ = pre.loadAndCleanDataMaxDom('1111', False, '2017', whoisdatacompl=True)\n features_2018, labelzz_2018, _ = pre.loadAndCleanDataMaxDom('1111', False, '2018', whoisdatacompl=True)\n\n x_train = pd.concat([features_2017, features_2018])\n y_train = np.concatenate([labelzz_2017, labelzz_2018])\n\n prean.covMatrix(x_train,y_train,'extended/')\n available, reputation, dns, whois, openintel, label = pre.loadAndCleanDataPerDataSet(False, '2019', whoisdatacompl=False)\n \n total_fn = 0\n total_manual = 0\n total_pred = 0\n total_amount_of_domains = len(available.index)\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_positive = labelzsss.sum()\n total_amount_negative = len(labelzsss.index) - labelzsss.sum()\n l = [False,True]\n dfs = []\n codesz = []\n ensemble_scores_pos = []\n ensemble_scores_neg = []\n ensemble_predictions = []\n ensemble_predictions_priori = []\n ensemble_labels_priori = []\n ensemble_labels = []\n metrics = { 'f1': [], 'precision': [], 'recall': [], 'acc_train': [], 'acc_test': [], 'eer': [], 'fnr_work_reduced': [],\n 'fpr_work_reduced': [], 'work_reduced': [], 'work_reduced_negative': [], 'work_reduced_positive': []}\n for x in itertools.product(l,repeat=4):\n code = ''.join(['1' if i else '0' for i in x])\n if code != '0000': # code[0] != '0'\n features_2017, labelzz_2017, _ = pre.loadAndCleanDataMaxDom(code, False, '2017', whoisdatacompl=False)\n features_2018, labelzz_2018, _ = pre.loadAndCleanDataMaxDom(code, False, '2018', whoisdatacompl=False)\n\n x_train = pd.concat([features_2017, features_2018])\n y_train = np.concatenate([labelzz_2017, labelzz_2018])\n y_train_category = np.concatenate([labelzz_2017, labelzz_2018*2])\n\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=42)\n clf.fit(x_train, y_train)\n\n # Construct domains that should be classified by this model\n features, labelzz = pre.loadAndCleanDataExactPattern(x, available, reputation, dns, whois, openintel, label)\n amount_of_domains = len(features.index)\n codesz.append(code)\n print(amount_of_domains, 'domains to classify for sourcepattern', code)\n if len(labelzz.index != 0):\n print(features.columns)\n print('With', amount_of_domains-labelzz.sum(), 'negative domains and', labelzz.sum(), 'positive domains')\n scores = clf.predict_proba(features)\n predictions = clf.predict(features)\n df = pd.DataFrame(list(zip(predictions, scores[:,1], len(predictions)*[code])),\n index=features.index, columns=['classification 0=benign, 1=malicious', 'score', 'model code'])\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, _, _ = workReducedPostDetermineThrOneGoBis(x_train, y_train, code, scores,\n labelzz, y_train_category, [0.02])\n total_fn += sum(negative_pred)\n total_manual += len(no_action_pred)\n total_pred += (len(positive_pred) + len(negative_pred))\n\n ensemble_predictions = ensemble_predictions + [1]*len(positive_pred) + [0]*len(negative_pred) + no_action_pred\n ensemble_labels = ensemble_labels + positive_pred + negative_pred + no_action_pred\n\n ensemble_predictions_priori = ensemble_predictions_priori + predictions.tolist()\n ensemble_labels_priori = ensemble_labels_priori + labelzz.values.tolist()\n\n dfs.append(df)\n\n ensemble_scores_neg = ensemble_scores_neg + scores[:, 1][labelzz == 0].tolist()\n ensemble_scores_pos = ensemble_scores_pos + scores[:, 1][labelzz == 1].tolist()\n\n print('Makes a prediction for', (len(positive_pred) + len(negative_pred)), 'domains')\n print('Would predict', np.sum(predictions), 'domains malicious')\n\n # Save predictions\n df = pd.concat(dfs)\n print(len(df.index),\" predictions made\")\n df.to_csv('dfs/2019/predictions.csv')\n\n print('Total work reduced', (total_amount_of_domains-total_manual - total_amount_of_domains*0.15)/total_amount_of_domains)\n print('Total FNR', total_fn/total_amount_positive)\n print('Total FPR', total_fp/total_amount_negative)\n\n print('Accuracy', accuracy_score(ensemble_labels, ensemble_predictions))\n print('F1', f1_score(ensemble_labels, ensemble_predictions))\n print('Precision', precision_score(ensemble_labels, ensemble_predictions))\n print('Recall', recall_score(ensemble_labels, ensemble_predictions))\n\n print('Little check', total_amount_positive+total_amount_negative == total_amount_of_domains)\n print('Little check', total_pred+total_manual == total_amount_of_domains)\n\n results['Cworkreducedextended'] = (total_amount_of_domains-total_manual)/total_amount_of_domains *100\n results['Cworkreduced'] = (total_amount_of_domains-total_manual - total_amount_of_domains*0.15)/total_amount_of_domains*100\n\n macroify.append_file(results)\n\n print('Little check 2', len(ensemble_scores_neg) + len(ensemble_scores_pos) == total_amount_of_domains)\n\n" }, { "alpha_fraction": 0.6357675790786743, "alphanum_fraction": 0.6463586688041687, "avg_line_length": 54.07777786254883, "blob_id": "2226ad9e0ff48fbac3bac9866998b9f970a3f1d6", "content_id": "0e704ca365aa72d752e54b9a351fe86432910b3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9914, "license_type": "no_license", "max_line_length": 166, "num_lines": 180, "path": "/evaluation_code_and_models/dataset_impact_evaluation_extended.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport argparse\nimport json\n\nimport matplotlib.ticker\nimport matplotlib.patches as patches\nimport pandas as pd\nimport numpy as np\nimport utils\n\nfrom sklearn.preprocessing import StandardScaler, Binarizer, LabelEncoder, LabelBinarizer, OneHotEncoder\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.impute import SimpleImputer\nfrom joblib import load\nimport itertools\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score, accuracy_score\nfrom evaluation.metrics import workReducedPostLoadThr\n\nimport dataprocessing.preprocessing as pre\nimport macroify\n\nimport bob.measure\n\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nif __name__ == \"__main__\":\n # 'Do the avalanche experiments, dataset impact evaluation of the extended model'\n results_posteriori = {'work_reduction_metric':[], 'fnr_metric': [], 'fpr_metric': [], 'accuracy_metric': [], 'f1_metric': [],\n 'precision_metric': [],'recall_metric': [], 'eer_metric':[]}\n results_priori = {'work_reduction_metric': [], 'fnr_metric': [], 'fpr_metric': [], 'accuracy_metric': [], 'f1_metric': [],\n 'precision_metric': [], 'recall_metric': [], 'eer_metric': []}\n missing_column = []\n\n for to_drop in ['None','reputation_available', 'dnsdb_available', 'whois_available', 'openintel_available']:\n available, reputation, dns, whois, openintel, label = pre.loadAndCleanDataPerDataSet(False, '2018')\n available['reputation_available'] = [True] * len(available.index)\n\n # real amount of labels with extra_train\n total_amount_of_2018_domains = len(available.index)\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_2018_positive = labelzsss.sum()\n total_amount_2018_negative = len(labelzsss.index) - labelzsss.sum()\n\n # dropping train labels\n ind_extra_train = load('models/' + 'extended' + '/additionaltrainindices.joblib')\n manual_added_to_trainingset = len(ind_extra_train)\n available = available.drop(ind_extra_train)\n reputation = reputation.drop(ind_extra_train)\n dns = dns.drop(ind_extra_train)\n whois = whois.drop(ind_extra_train)\n openintel = openintel.drop(ind_extra_train)\n label = label.drop(ind_extra_train)\n\n # amount of evaluation data\n total_amount_of_domains = len(available.index)\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_positive = labelzsss.sum()\n total_amount_negative = len(labelzsss.index) - labelzsss.sum()\n\n if to_drop == 'activeandpassive':\n available['dnsdb_available'] = [False] * len(available.index)\n available['openintel_available'] = [False] * len(available.index)\n elif not to_drop == 'None':\n available[to_drop] = [False]*len(available.index)\n\n # keeping track of results\n total_fp = 0\n total_fn = 0\n total_manual = 0\n total_pred = 0\n total_amount_of_domains = len(available.index)\n\n dfs = []\n codesz = []\n ensemble_predictions = []\n ensemble_labels = []\n ensemble_scores_pos = []\n ensemble_scores_neg = []\n ensemble_predictions_priori = []\n ensemble_labels_priori = []\n\n metrics = { 'f1': [], 'precision': [], 'recall': [], 'acc_train': [], 'acc_test': [], 'eer': [], 'fnr_work_reduced': [],\n 'fpr_work_reduced': [], 'work_reduced': [], 'work_reduced_negative': [], 'work_reduced_positive': []}\n\n l = [False, True]\n for x in itertools.product(l,repeat=4):\n code = ''.join(['1' if i else '0' for i in x])\n features, labelzz = pre.loadAndCleanDataExactPatternAlt(x, available, reputation, dns, whois, openintel,\n label)\n amount_of_domains = len(features.index)\n print(amount_of_domains, 'domains to classify for sourcepattern', code)\n if code != '0000': # code[0] != '0'\n clf = load('models/' + 'extended' + '/model' + code + '.joblib')\n\n # Construct domains that should be classified by this model\n\n if len(labelzz.index != 0):\n print('With', amount_of_domains-labelzz.sum(), 'negative domains and', labelzz.sum(), 'positive domains')\n\n index = features.index\n scores = clf.predict_proba(features)\n predictions = clf.predict(features)\n df = pd.DataFrame(list(zip(predictions, scores[:,1], len(predictions)*[code])),\n index=features.index, columns=['classification 0=benign, 1=malicious', 'score', 'model code'])\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, _, _ = workReducedPostLoadThr('extended', code, scores, labelzz)\n\n total_fp += (len(positive_pred) - sum(positive_pred))\n total_fn += sum(negative_pred)\n total_manual += len(no_action_pred)\n total_pred += (len(positive_pred) + len(negative_pred))\n\n ensemble_predictions = ensemble_predictions + [1]*len(positive_pred) + [0]*len(negative_pred) + no_action_pred\n ensemble_labels = ensemble_labels + positive_pred + negative_pred + no_action_pred\n\n ensemble_predictions_priori = ensemble_predictions_priori + predictions.tolist()\n ensemble_labels_priori = ensemble_labels_priori + labelzz.values.tolist()\n\n ensemble_scores_neg = ensemble_scores_neg + scores[:, 1][labelzz == 0].tolist()\n ensemble_scores_pos = ensemble_scores_pos + scores[:, 1][labelzz == 1].tolist()\n\n print('Makes a prediction for', (len(positive_pred) + len(negative_pred)), 'domains')\n print('Would predict', np.sum(predictions), 'domains malicious')\n else:\n total_manual += len(labelzz.index)\n ensemble_predictions = ensemble_predictions + labelzz.values.tolist()\n ensemble_labels = ensemble_labels + labelzz.values.tolist()\n\n print('Total work reduced', (total_amount_of_domains-total_manual)/total_amount_of_domains)\n print('Total FNR', total_fp/total_amount_negative)\n print('Total FPR', total_fn/total_amount_positive)\n print('Total work reduced real', (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains)\n print('Total FNR real', total_fn / total_amount_2018_positive)\n print('Total FPR real', total_fp / total_amount_2018_negative)\n\n print('Accuracy', accuracy_score(ensemble_labels, ensemble_predictions))\n print('F1', f1_score(ensemble_labels, ensemble_predictions))\n print('Precision', precision_score(ensemble_labels, ensemble_predictions))\n print('Recall', recall_score(ensemble_labels, ensemble_predictions))\n\n print('Little check', total_amount_positive+total_amount_negative == total_amount_of_domains)\n print('Little check', total_pred+total_manual == total_amount_of_domains)\n print('Little check', len(ensemble_scores_pos) + len(ensemble_scores_neg) == total_amount_of_domains)\n print('Little check', len(ensemble_scores_pos) == total_amount_positive)\n print('Little check', len(ensemble_scores_neg) == total_amount_negative)\n print('Little check', total_amount_of_domains + manual_added_to_trainingset == total_amount_of_2018_domains)\n\n results_posteriori['work_reduction_metric'].append((total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains)\n results_posteriori['fnr_metric'].append(total_fn / total_amount_2018_positive)\n results_posteriori['fpr_metric'].append(total_fp / total_amount_2018_negative)\n results_posteriori['accuracy_metric'].append(accuracy_score(ensemble_labels, ensemble_predictions))\n results_posteriori['f1_metric'].append(f1_score(ensemble_labels, ensemble_predictions))\n results_posteriori['precision_metric'].append(precision_score(ensemble_labels, ensemble_predictions))\n results_posteriori['recall_metric'].append(recall_score(ensemble_labels, ensemble_predictions))\n\n results_posteriori['eer_metric'].append(bob.measure.eer(ensemble_scores_neg,ensemble_scores_pos))\n results_priori['eer_metric'].append(bob.measure.eer(ensemble_scores_neg,ensemble_scores_pos))\n\n results_priori['work_reduction_metric'].append((total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains)\n results_priori['fnr_metric'].append(total_fn / total_amount_2018_positive)\n results_priori['fpr_metric'].append(total_fp / total_amount_2018_negative)\n results_priori['accuracy_metric'].append(accuracy_score(ensemble_labels_priori, ensemble_predictions_priori))\n results_priori['f1_metric'].append(f1_score(ensemble_labels_priori, ensemble_predictions_priori))\n results_priori['precision_metric'].append(precision_score(ensemble_labels_priori, ensemble_predictions_priori))\n results_priori['recall_metric'].append(recall_score(ensemble_labels_priori, ensemble_predictions_priori))\n\n missing_column.append(to_drop)\n\n df = pd.DataFrame(results_posteriori, index=missing_column)\n df.to_csv('dfs/' + 'extended' + '/dataset_impact_posteriori.csv')\n\n df = pd.DataFrame(results_priori, index=missing_column)\n df.to_csv('dfs/' + 'extended' + '/dataset_impact_priori.csv')\n" }, { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.4481792747974396, "avg_line_length": 18.88888931274414, "blob_id": "690739068123f1986e58e276e899cbc418ff10f1", "content_id": "1c3e81a354346f324db90585b1bc625354c1c8fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 357, "license_type": "no_license", "max_line_length": 30, "num_lines": 18, "path": "/evaluation_code_and_models/utils.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "def translatecode(code):\n out = ''\n for char in code:\n if char == '1':\n out += 'Y'\n else:\n out += 'N'\n return out\n\ndef translateyear(year):\n out = 'somethingwentworng'\n if year == '2017':\n out = 'A'\n elif year == '2018':\n out = 'B'\n elif year == '2019':\n out = 'C'\n return out" }, { "alpha_fraction": 0.5887522101402283, "alphanum_fraction": 0.5992969870567322, "avg_line_length": 29, "blob_id": "cdfbece2a42e3c5d9e6f7151e4025bafc654cd22", "content_id": "e341d6f1db94dae227cebabda16fa514be5aa578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 76, "num_lines": 19, "path": "/evaluation_code_and_models/macroify.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "def macroify(dct):\n result_string = \"\"\n for k, v in dct.items():\n result_string += \"{}\\n\".format(macroify_single(k, v))\n return result_string\n\ndef macroify_single(key, value):\n return \"\\\\newcommand{{\\\\{key}}}{{{value}}}\".format(key=key, value=value)\n\ndef append_file(dct):\n with open('latexvariables.txt','a') as myfile:\n myfile.write(macroify(dct))\n\ndef new_file(dct):\n with open('latexvariables.txt', 'w+') as myfile:\n myfile.write(macroify(dct))\n\nif __name__ == '__main__':\n print(macroify({\"a\": 123, \"b\": 456, \"c\": \"ABC\"}))" }, { "alpha_fraction": 0.562603235244751, "alphanum_fraction": 0.579561710357666, "avg_line_length": 44.62311553955078, "blob_id": "03b1f6f8d8653b9d5a5544e2091c35138daf805b", "content_id": "2db1689aa5e447cbfb2cf63adb67930841adff4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9081, "license_type": "no_license", "max_line_length": 161, "num_lines": 199, "path": "/evaluation_code_and_models/evaluation/postanalysis.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport datetime\n\nfrom sklearn.metrics import confusion_matrix\nimport macroify\n\ndef saveFpFnDf(x_test,y_test,y_pred,columnnames,domainname, path):\n ''' Save the domains that were falsy classified. FP and FN are saved in sperate files\n :param x_test: features of the data points\n :param y_test: real labels\n :param y_pred: predicted labels\n :param columnnames: names of the features\n :param domainname: list of domainnnames\n :param path: path to save to\n '''\n df = pd.DataFrame(x_test, columns=columnnames, index = domainname)\n fpdf = df[np.logical_and(y_test != y_pred, y_pred == 1)]\n fndf = df[np.logical_and(y_test != y_pred, y_pred == 0)]\n fpdf.to_csv('dfs/' + path + 'falsepositives.csv')\n fndf.to_csv('dfs/' + path + 'falsenegatives.csv')\n\ndef saveFpFnDfBis(df, labels,labels_pred, path):\n ''' Save the domains that were falsy classified. FP and FN are saved in sperate files\n :param x_test: features of the data points\n :param y_test: real labels\n :param y_pred: predicted labels\n :param columnnames: names of the features\n :param domainname: list of domainnnames\n :param path: path to save to\n '''\n y_test = labels.values\n y_pred = labels_pred\n print(y_test)\n print(y_pred)\n print(y_test != y_pred)\n fpdf = df[np.logical_and(y_test != y_pred, y_pred == 1)]\n fndf = df[np.logical_and(y_test != y_pred, y_pred == 0)]\n fpdf.to_csv('dfs/' + path + 'falsepositives.csv')\n fndf.to_csv('dfs/' + path + 'falsenegatives.csv')\n\ndef saveimportance(importances, featurenames, path):\n ''' Save the feature importances\n :param importances: the importance scores\n :param featurenames: the name of the features\n :param path: path to save to\n '''\n df = pd.DataFrame({'featurename':featurenames, 'score':importances})\n df = df.sort_values('score',ascending=False)\n df.to_csv('dfs/' + path + 'importance.csv')\n\ndef featureDistribution(columnnames, estimators, distributions):\n for estimator in estimators:\n estimator = estimator[0]\n for fid,thr in zip(estimator.tree_.feature, estimator.tree_.threshold):\n if fid >= 0:\n distributions[columnnames[fid]].append(thr)\n\n\ndef understandDecisionTree(estimator):\n n_nodes = estimator.tree_.node_count\n children_left = estimator.tree_.children_left\n children_right = estimator.tree_.children_right\n feature = estimator.tree_.feature\n threshold = estimator.tree_.threshold\n\n node_depth = np.zeros(shape=n_nodes, dtype=np.int64)\n is_leaves = np.zeros(shape=n_nodes, dtype=bool)\n stack = [(0, -1)] # seed is the root node id and its parent depth\n while len(stack) > 0:\n node_id, parent_depth = stack.pop()\n node_depth[node_id] = parent_depth + 1\n\n # If we have a test node\n if (children_left[node_id] != children_right[node_id]):\n stack.append((children_left[node_id], parent_depth + 1))\n stack.append((children_right[node_id], parent_depth + 1))\n else:\n is_leaves[node_id] = True\n\n print(\"The binary tree structure has %s nodes and has \"\n \"the following tree structure:\"\n % n_nodes)\n for i in range(n_nodes):\n if is_leaves[i]:\n print(\"%snode=%s leaf node.\" % (node_depth[i] * \"\\t\", i))\n else:\n print(\"%snode=%s test node: go to node %s if X[:, %s] <= %s else to \"\n \"node %s.\"\n % (node_depth[i] * \"\\t\",\n i,\n children_left[i],\n feature[i],\n threshold[i],\n children_right[i],\n ))\n\ndef print_performance_per_malware_family(y_test, y_pred, y_post, column_malware_family = 0, print_to_tex=False):\n '''\n Analyse the performance for each malware family\n :param y_test: real labels\n :param y_pred: predicted labels\n :param y_post: the accounting information for each data point, i.e. malware family\n :param column_malware_family: where the malware family column is located\n :param print_to_tex:\n :return:\n '''\n malware_families = set(y_post[:,column_malware_family])\n s = 0\n\n if print_to_tex:\n print(\"Family & \\# samples & Acc. & Prec. & Rec. & FNR & FPR \\\\\\\\\")\n for family in malware_families:\n y_test_family = y_test[y_post[:,column_malware_family] == family]\n y_pred_family = y_pred[y_post[:,column_malware_family] == family]\n try:\n tn, fp, fn, tp = confusion_matrix(y_test_family, y_pred_family).ravel()\n print('{} got {} tp, {} fp, {} tn, {} fn, {:.2f} accuracy, {:.2f} fnr, {:.2f} fpr'.format(\\\n family, tp, fp, tn, fn, (tp + tn) / (tp + tn + fp + fn), fn / (fn + tp), fp / (fp+tn)))\n if tp + fp + tn + fn > 50:\n if print_to_tex:\n print('{} & {} & {:.1f}\\\\% & {:.1f}\\\\% & {:.1f}\\\\% & {:.1f}\\\\% & {:.1f}\\\\% \\\\\\\\'.format(\n family, tp + fp + tn + fn, 100*(tp + tn) / (tp + tn + fp + fn), 100*tp/(tp+fp), 100*tp/(tp+fn), 100*fn / (fn + tp) , 100*fp / (fp+tn)))\n else:\n print('{} got {} samples, {:.2f} accuracy, {:.2f} fnr, {:.2f} fpr, {:.2f} precision, {:.2f} recall'.format(\\\n family, tp + fp + tn + fn, (tp + tn) / (tp + tn + fp + fn), fn / (fn + tp), fp / (fp+tn), tp/(tp+fp), tp/(tp+fn)))\n s = s + tn + fp + fn + tp\n except ValueError:\n print('family {} got no result'.format(family))\n\n print('Total amount of domains ' + str(s))\n\ndef print_performance_per_malware_validity_timestamp(y_test, y_pred, y_post, column_timestamp=-1):\n '''\n Print the performance per malware validity timestamp\n :param y_test:\n :param y_pred:\n :param y_post:\n :param column_timestamp:\n :return:\n '''\n timestamps = [datetime.datetime(2017, 11, 30, 0, 0, 0)] + \\\n [datetime.datetime(2017, month, 1, 0, 0, 0) for month in range(12, 12+1)] + \\\n [datetime.datetime(2018, month, 1, 0, 0, 0) for month in range(1, 12 + 1)] + \\\n [datetime.datetime(2019, 1, 1, 0, 0, 0)] + \\\n [datetime.datetime(2049, 1, 1, 0, 0, 0)]\n s = 0\n plot_data = []\n\n for timestamp_idx in range(len(timestamps) - 1):\n y_test_family = y_test[(timestamps[timestamp_idx] <= y_post[:, column_timestamp]) & (y_post[:, column_timestamp] < timestamps[timestamp_idx+1])]\n y_pred_family = y_pred[(timestamps[timestamp_idx] <= y_post[:, column_timestamp]) & (y_post[:, column_timestamp] < timestamps[timestamp_idx+1])]\n try:\n tn, fp, fn, tp = confusion_matrix(y_test_family, y_pred_family).ravel()\n print('{} got {} tp, {} fp, {} tn, {} fn, {:.2f} accuracy, {:.2f} fnr, {:.2f} fpr'.format(\n timestamps[timestamp_idx].month, tp, fp, tn, fn, (tp + tn) / (tp + tn + fp + fn), fn / (fn + tp), fp / (fp + tn)))\n plot_data.append((\"{}-{}\".format(timestamps[timestamp_idx].month, timestamps[timestamp_idx].year),\n tp, fp, tn, fn, (tp + tn) / (tp + tn + fp + fn), fn / (fn + tp), fp / (fp + tn)))\n s = s + tn + fp + fn + tp\n except ValueError:\n print('got no result')\n import matplotlib.pyplot as plt\n labels = \"tp fp tn fn accuracy fnr fpr\".split(\" \")\n plt.plot([d[0] for d in plot_data], [sum(d[1:5]) for d in plot_data] )\n plt.plot([d[0] for d in plot_data], [sum(d[1:3]) for d in plot_data] )\n plt.plot([d[0] for d in plot_data], [sum(d[3:5]) for d in plot_data])\n\n plt.show()\n for i in range(4,len(labels)):\n plt.plot([d[0] for d in plot_data], [d[i+1] for d in plot_data],\n label=labels[i])\n plt.show()\n\ndef workReducedPostDomains(trainyear, code, scores):\n '''returns the actual domains'''\n thresholds = pd.read_csv('dfs/' + trainyear + '/' + code + '_workreduced.csv', index_col=0).loc[:,\n ['thresholds_fnr', 'thresholds_fpr']]\n upper = thresholds.iloc[3, 1]\n lower = thresholds.iloc[3, 0]\n\n negative_pred_ind = [ s < lower for s in scores[:,1]]\n no_action_pred_ind = [ (s >= lower) and (s <= upper) for s in scores[:,1]]\n positive_pred_ind = [ s > upper for s in scores[:,1]]\n\n return negative_pred_ind, no_action_pred_ind, positive_pred_ind\n\ndef thresholdsToLatex(path='dfs/2017/1111_workreduced.csv'):\n df = pd.read_csv(path,index_col=0)\n results = {}\n results['WorkReducedLowerBound'] = df.loc[:,'fnr'].iloc[-1]\n results['WorkReducedUpperBound'] = 100-df.loc[:,'fpr'].iloc[-1]\n results['WorkReducedHundredMinusUpperBound'] = df.loc[:,'fpr'].iloc[-1]\n results['WorkReducedTwoPercent'] = df.loc[:,'sum'].iloc[-1]\n results['WorkReducedHundredMinusTwoPercent'] = 100 - df.loc[:,'sum'].iloc[-1]\n # results['WorkReducedOnePercent'] = df.iloc[2,2]\n # results['WorkReducedPointFivePercent'] = df.iloc[1,2]\n # results['WorkReducedPointOnePercent'] = df.iloc[0,2]\n\n macroify.append_file(results)\n\n\n" }, { "alpha_fraction": 0.6864525675773621, "alphanum_fraction": 0.6879505515098572, "avg_line_length": 54.05670166015625, "blob_id": "18c57983a13136204a76b57d8e9a8c2cec5dda3a", "content_id": "dfa5ec71b02689b9dec1693d103e67b892e60445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10681, "license_type": "no_license", "max_line_length": 133, "num_lines": 194, "path": "/feature_generation/retrieve_sinkhole_data.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import csv\nimport os\nimport time\nimport urllib.request\nfrom glob import glob\nimport socket\n\nimport requests\n\nfrom credentials import SINKDB_HTTP_API_KEY\n\n\ndef download_sinkholes_stamparm(formatted_snapshot_date):\n \"\"\"\n ns = document.querySelectorAll(\".js-navigation-open\");\n results = [];\n for (i = 0; i < ns.length; i++) {\n let n = ns[i].text;\n if (n.startsWith(\"sinkhole_\")) {\n results.push(\"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/\" + n)\n }\n };\n console.log(results);\n\n @ https://github.com/stamparm/maltrail/tree/master/trails/static/malware\n \"\"\"\n urls = [\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_abuse.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_anubis.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_arbor.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_bitdefender.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_blacklab.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_botnethunter.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_certgovau.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_certpl.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_checkpoint.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_cirtdk.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_collector.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_conficker.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_cryptolocker.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_drweb.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_dynadot.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_dyre.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_farsight.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_fbizeus.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_fitsec.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_fnord.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_fraunhofer.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_gameoverzeus.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_georgiatech.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_gladtech.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_honeybot.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_hyas.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_kaspersky.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_kryptoslogic.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_microsoft.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_noip.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_rsa.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_secureworks.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_shadowserver.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_sidnlabs.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_sinkdns.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_sofacy.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_sugarbucket.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_supportintel.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_switch.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_tech.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_tsway.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_unknown.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_virustracker.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_wapacklabs.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_xaayda.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_yourtrap.txt\",\n \"https://raw.githubusercontent.com/stamparm/maltrail/master/trails/static/malware/sinkhole_zinkhole.txt\"\n ]\n for url in urls:\n urllib.request.urlretrieve(url, \"input_data/{}/stamparm_sinkhole/{}\".format(formatted_snapshot_date, url.split(\"/\")[-1]))\n\n\ndef parse_sinkholes_stamparm(formatted_snapshot_date):\n sinkholes_ip = set()\n sinkholes_ip_with_source = set()\n sinkholes_ns = set()\n sinkholes_ns_with_source = set()\n for fp in glob(os.path.join(os.path.dirname(__file__), \"input_data/{}/stamparm_sinkhole/*.txt\".format(formatted_snapshot_date))):\n source = fp[:-4].split(\"_\")[-1]\n with open(fp) as f:\n for line in f:\n line = line.rstrip()\n if line and not line.startswith(\"#\"):\n try:\n socket.inet_aton(line)\n # is an IP address\n sinkholes_ip.add(line)\n sinkholes_ip_with_source.add((line, source))\n except socket.error:\n # is not an IP address\n sinkholes_ns.add(line)\n sinkholes_ns_with_source.add((line, source))\n return sinkholes_ip, sinkholes_ns, sinkholes_ip_with_source, sinkholes_ns_with_source\n\n\ndef parse_sinkholes_alowaisheq_ns():\n sinkholes_ns = set()\n with open(os.path.join(os.path.dirname(__file__), \"alowaisheq_sinkholes_ns.txt\")) as f:\n for line in f:\n line = line.rstrip()\n if line:\n sinkholes_ns.add(line)\n return sinkholes_ns\n\n\ndef load_sinkdb_cache(record, folder):\n if not os.path.exists(os.path.join(folder, \"sinkdb_cache_{record}.csv\".format(record=record))):\n return {}\n with open(os.path.join(folder, \"sinkdb_cache_{record}.csv\".format(record=record))) as sc:\n csvr = csv.reader(sc)\n return {entry: True if status == \"True\" else False for entry, status in csvr}\n\n\ndef check_a_against_sinkdb(ip_address, sinkdb_a_cache, cache_folder):\n if ip_address in sinkdb_a_cache:\n return sinkdb_a_cache[ip_address]\n try:\n r = requests.post(\"https://sinkdb-api.abuse.ch/api/v1/\", data={\"api_key\": SINKDB_HTTP_API_KEY, \"ipv4\": ip_address})\n answer = r.json()\n if answer[\"query_status\"] == \"no_results\":\n result = False\n elif answer[\"query_status\"] == \"ok\":\n result = any(result[\"source\"] == \"sinkhole\" for result in answer[\"results\"])\n else:\n result = False\n except:\n result = False\n with open(os.path.join(cache_folder, \"sinkdb_cache_a.csv\"), \"a\") as sc:\n sc.write(\"{},{}\\n\".format(ip_address, result))\n return result\n\n\ndef check_ns_against_sinkdb(nameserver, sinkdb_ns_cache, cache_folder):\n if nameserver in sinkdb_ns_cache:\n return sinkdb_ns_cache[nameserver]\n try:\n r = requests.post(\"https://sinkdb-api.abuse.ch/api/v1/\", data={\"api_key\": SINKDB_HTTP_API_KEY, \"domain\": nameserver})\n answer = r.json()\n if answer[\"query_status\"] == \"no_results\":\n result = False\n elif answer[\"query_status\"] == \"ok\":\n result = any(result[\"source\"] == \"sinkhole\" for result in answer[\"results\"])\n else:\n result = False\n except:\n # NXDOMAIN\n result = False\n with open(os.path.join(cache_folder, \"sinkdb_cache_ns.csv\"), \"a\") as sc:\n sc.write(\"{},{}\\n\".format(nameserver, result))\n return result\n\nsinkholes_stamparm_ip, sinkholes_stamparm_ns, _, _ = parse_sinkholes_stamparm(\"20191129\")\n\ndef check_against_stamparm_ip(ip_address):\n return ip_address in sinkholes_stamparm_ip\n\ndef check_against_stamparm_ns(ns):\n return ns in sinkholes_stamparm_ns\n\nsinkholes_alowaisheq_ns = parse_sinkholes_alowaisheq_ns()\n\ndef check_against_alowaisheq_ns(ns):\n return ns in sinkholes_alowaisheq_ns\n\ndef load_whois_sinkhole_emails():\n with open(\"sinkhole_emails.txt\") as sem:\n return [mail_address.rstrip() for mail_address in sem if mail_address.rstrip() and not mail_address.startswith(\"#\")]\n\n\ndef check_against_sinkhole_emails(mail_address):\n whois_sinkhole_emails = load_whois_sinkhole_emails()\n return mail_address in whois_sinkhole_emails\n\n\ndef check_all_against_sinkdb(input_file, cache_folder, rrtype):\n sinkdb_a_cache = load_sinkdb_cache(rrtype, cache_folder)\n\n with open(input_file) as input:\n for line in input:\n ip = line.split(\",\")[0]\n if rrtype == \"a\":\n res = check_a_against_sinkdb(ip, sinkdb_a_cache, cache_folder)\n elif rrtype == \"ns\":\n res = check_ns_against_sinkdb(ip, sinkdb_a_cache, cache_folder)\n\n if res == True:\n print(ip, res)\n" }, { "alpha_fraction": 0.6291484832763672, "alphanum_fraction": 0.6502302885055542, "avg_line_length": 55.63210678100586, "blob_id": "5df9279cf6df5be0888faabbb57ca3b25ff8725d", "content_id": "e7699a0b5cf7f91d786b636ef8f326be0cc2b9fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16934, "license_type": "no_license", "max_line_length": 178, "num_lines": 299, "path": "/evaluation_code_and_models/incremental_learning_evaluation.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import dataprocessing.preprocessing as pre\n\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom joblib import load\nimport utils\n\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport itertools\nfrom joblib import dump\n\nfrom sklearn.metrics import confusion_matrix, f1_score, roc_auc_score, precision_score, recall_score, accuracy_score\nfrom evaluation.metrics import workReducedPostLoadThr, workReducedPostDetermineThr, workReducedPostDetermineThrOneGo, \\\n workReducedPostDetermineThrOneGoOneYear, workReducedPost, workReducedPostDetermineThrOneGoBis\nimport dataprocessing.sampleselection as ss\nimport evaluation.postanalysis as postan\nimport macroify\nimport bob.measure\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Train production classifier with some 2018 data. This code implements more techniques than described in the NDSS 2020 paper')\n\n parser.add_argument('--strategy', '-st',\n type=str,\n default='random',\n help='How to select the additional samples that have to be added, should be either random')\n\n args = parser.parse_args()\n strategy = args.strategy\n\n # We tried more strategies to select additional data than described in the NDSS paper.\n strategies = {'random': ss.random, 'practical': ss.practical, 'practicalFraction':ss.practicalFraction}\n if strategy == 'random':\n fraction = 0.15\n else:\n fraction = 100\n\n results = {}\n\n method = {'random':ss.random, 'practical':ss.practical, 'practicalFraction':ss.practicalFraction}\n\n available, reputation, dns, whois, openintel, label = pre.loadAndCleanDataPerDataSet(False, '2018')\n total_amount_of_2018_domains = len(available.index)\n\n costs = [0.001, 0.005, 0.01, 0.02]\n workreduceddict = {}\n for c in costs:\n workreduceddict[c] = {}\n workreduceddict[c]['total_fp'] = 0\n workreduceddict[c]['total_fn'] = 0\n workreduceddict[c]['total_manual'] = 0\n workreduceddict[c]['total_pred'] = 0\n workreduceddict[c]['ensemble_predictions'] = []\n workreduceddict[c]['ensemble_labels'] = []\n workreduceddict[c]['ensemble_scores_pos'] = []\n workreduceddict[c]['ensemble_scores_neg'] = []\n workreduceddict[c]['ensemble_predictions_priori'] = []\n workreduceddict[c]['ensemble_labels_priori'] = []\n whoisclassified_domians_dfs = []\n whoisclassified_domains_labels = []\n whoisclassified_domains_prediction = []\n\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzsss = label.map(classDictionary)\n total_amount_2018_positive = labelzsss.sum()\n total_amount_2018_negative = len(labelzsss.index) - labelzsss.sum()\n l = [False,True]\n dfs = []\n codesz = []\n metrics = { 'f1': [], 'precision': [], 'recall': [], 'acc_train': [], 'acc_test': [], 'eer': [], 'fnr_work_reduced': [],\n 'fpr_work_reduced': [], 'work_reduced': [], 'work_reduced_negative': [],\n 'work_reduced_positive': [], 'work_reduced_real':[]}\n\n # Select training data - features2 need to be added to the training set.\n features2, labels2, post_analysis_labels2 = pre.loadAndCleanDataMaxDom('1111', False, '2018')\n\n features2, features_test_domains, labels2, labels_test_domains = \\\n method[strategy](features2, labels2, **{'fraction':fraction, 'code': '1111'})\n\n manual_added_to_trainingset = len(labels2)\n print('From 2018', manual_added_to_trainingset ,'samples are added to the training set')\n labels2 = pd.Series(labels2, index=features2.index)\n\n labels_test_domains = pd.Series(labels_test_domains, index=features_test_domains.index)\n amount_of_test_domains = len(labelzsss) - len(features2)\n total_amount_positive_test = total_amount_2018_positive - labels2.sum()\n total_amount_negative_test = total_amount_2018_negative - (len(labels2.index) - labels2.sum())\n ind_extra_train = features2.index\n\n # save extra_train_indices to drop them when models are used\n dump(ind_extra_train, 'models/' + 'extended' + '/additionaltrainindices.joblib')\n\n for x in itertools.product(l,repeat=4):\n code = ''.join(['1' if i else '0' for i in x])\n if code != '0000':\n # features1 is the 2017 data and is first part of the training set.\n features1, labels1, post_analysis_labels1 = pre.loadAndCleanDataMaxDom(code, False, '2017')\n # select training and testing indices from 'correct' (=abiding model code) featureset\n features3, labels3, post_analysis_labels3 = pre.loadAndCleanDataMaxDom(code, False, '2018')\n labels3 = pd.Series(labels3, index=features3.index)\n\n features_extra_train = features3.loc[ind_extra_train]\n labels_extra_train = labels3.loc[ind_extra_train]\n\n features_test = features3.drop(ind_extra_train)\n labels_test = labels3.drop(ind_extra_train)\n\n features_train = pd.concat([features1, features_extra_train])\n labels_train = np.concatenate([labels1, labels_extra_train])\n labels_train_year = np.concatenate([labels1, labels_extra_train*2])\n\n nb_test_domains = len(labels_test)\n nb_test_domains_with_extra_train = len(labels3)\n\n print('Total training set size', len(labels_train))\n\n # Load hyperparameters and train classifier\n clf_tuned = load('models/2017/model' + code + '.joblib')\n if isinstance(clf_tuned, GradientBoostingClassifier):\n params = clf_tuned.get_params()\n clf = GradientBoostingClassifier(**params)\n else:\n params = clf_tuned.best_params_\n clf = GradientBoostingClassifier(**params, random_state=42)\n clf.fit(features_train, labels_train)\n\n #save clf\n dump(clf, 'models/' + 'extended' + '/model' + code + '.joblib')\n\n # Evaluate\n predictions = clf.predict(features_test)\n scores = clf.predict_proba(features_test)\n\n acc = accuracy_score(labels_test, predictions)\n f1 = f1_score(labels_test, predictions)\n prec = precision_score(labels_test, predictions)\n reca = recall_score(labels_test, predictions)\n\n # TODO: choose threshold selection method\n # positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n # work_reduced_positive, lower, upper = \\\n # workReducedPostLoadThr('2017', code, scores, labels_test)\n\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive, thresholdsfnr, thresholdsfpr = \\\n workReducedPostDetermineThrOneGoBis(features_train, labels_train, code, scores, labels_test,\n labels_train_year, costs, plot=True, savemetrics=True, path='extended/' + code + '_')\n\n if code == '1111':\n postan.thresholdsToLatex(path='dfs/extended/1111_workreduced.csv')\n postan.saveimportance(clf.feature_importances_, features_test.columns, 'extended/1111_')\n # positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n # work_reduced_positive, lower, upper = \\\n # workReducedPostDetermineThrOneGoOneYear(features1, labels1, features_extra_train, labels_extra_train, code,\n # scores, labels_test)\n\n print('Manual work for', len(no_action_pred), 'domains. This work is on top of the',\n len(labels_extra_train), 'that had to be labeled manually to add them to the trainingset')\n codesz.append(code)\n metrics['acc_test'].append(acc)\n metrics['f1'].append(f1)\n metrics['precision'].append(prec)\n metrics['work_reduced_negative'].append(len(negative_pred) / nb_test_domains)\n metrics['work_reduced_positive'].append(len(positive_pred) / nb_test_domains)\n metrics['work_reduced'].append(len(negative_pred) / nb_test_domains + len(positive_pred) / nb_test_domains)\n metrics['work_reduced_real'].append((nb_test_domains - len(no_action_pred)) / nb_test_domains_with_extra_train)\n metrics['fnr_work_reduced'].append(fnr)\n metrics['fpr_work_reduced'].append(fpr)\n\n # Construct domains that should be classified by this model\n features, labelzz = pre.loadAndCleanDataExactPattern(x, available, reputation, dns, whois, openintel, label)\n iters = features.index.intersection(features_test.index)\n features_to_classify = features_test.loc[iters]\n labelzz = labelzz.loc[iters]\n amount_of_domains = len(features_to_classify.index)\n\n print(amount_of_domains, 'domains to classify for code', code)\n if len(labelzz.index != 0):\n\n scores = clf.predict_proba(features_to_classify)\n predictions = clf.predict(features_to_classify)\n \n if code[2] == '1':\n df = whois.loc[features_to_classify.index]\n whoisclassified_domians_dfs.append(df)\n whoisclassified_domains_labels.append(labelzz.loc[df.index])\n print(type(predictions))\n whoisclassified_domains_prediction.append(predictions)\n\n for i,c in enumerate(costs):\n lower = thresholdsfnr[i]\n upper = thresholdsfpr[i]\n positive_pred, negative_pred, no_action_pred, fnr, fpr, work_reduced, work_reduced_negative, \\\n work_reduced_positive = workReducedPost(lower, upper, scores, labelzz)\n\n workreduceddict[c]['total_fp'] += (len(positive_pred) - sum(positive_pred))\n workreduceddict[c]['total_fn'] += sum(negative_pred)\n workreduceddict[c]['total_manual'] += len(no_action_pred)\n workreduceddict[c]['total_pred'] += (len(positive_pred) + len(negative_pred))\n\n workreduceddict[c]['ensemble_predictions'] = workreduceddict[c]['ensemble_predictions'] + [1] * len(positive_pred) + [0] * len(negative_pred) + no_action_pred\n workreduceddict[c]['ensemble_labels'] = workreduceddict[c]['ensemble_labels'] + positive_pred + negative_pred + no_action_pred\n\n workreduceddict[c]['ensemble_predictions_priori'] = workreduceddict[c]['ensemble_predictions_priori'] + predictions.tolist()\n workreduceddict[c]['ensemble_labels_priori'] = workreduceddict[c]['ensemble_labels_priori'] + labelzz.values.tolist()\n\n workreduceddict[c]['ensemble_scores_neg'] = workreduceddict[c]['ensemble_scores_neg'] + scores[:, 1][labelzz == 0].tolist()\n workreduceddict[c]['ensemble_scores_pos'] = workreduceddict[c]['ensemble_scores_pos'] + scores[:, 1][labelzz == 1].tolist()\n\n # dfs.append(df)\n\n print('Makes a prediction for', (len(positive_pred) + len(negative_pred)), 'domains')\n print('Would predict', np.sum(predictions), 'domains malicious')\n print('=========================================')\n\n # Print performance per model\n print('===============================================================================')\n for key, value in metrics.items():\n if value:\n print('========== %s ============' % (key))\n for i,v in enumerate(value):\n print('Model %s: %.3f' % (codesz[i], v))\n # codestr = utils.translatecode(code)\n # results[key + codestr] = v\n print('===============================================================================')\n\n total_fp = workreduceddict[0.02]['total_fp']\n total_fn = workreduceddict[0.02]['total_fn']\n total_manual = workreduceddict[0.02]['total_manual']\n total_pred = workreduceddict[0.02]['total_pred']\n\n # Test set + what has to be added to training set.\n print('Total work reduced real', (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains)\n print('Total FNR', total_fn / total_amount_2018_positive)\n print('Total FPR', total_fp / total_amount_2018_negative)\n # Only test set.\n print('Total work reduced only test set', (amount_of_test_domains-total_manual)/amount_of_test_domains)\n print('Total FNR only test set', total_fn/total_amount_positive_test)\n print('Total FPR only test set', total_fp/total_amount_negative_test)\n\n ensemble_labels = workreduceddict[0.02]['ensemble_labels']\n ensemble_predictions = workreduceddict[0.02]['ensemble_predictions']\n ensemble_labels_priori = workreduceddict[0.02]['ensemble_labels_priori']\n ensemble_predictions_priori = workreduceddict[0.02]['ensemble_predictions_priori']\n ensemble_scores_pos = workreduceddict[0.02]['ensemble_scores_pos']\n ensemble_scores_neg = workreduceddict[0.02]['ensemble_scores_neg']\n\n # FP and FN to file\n df_data = pd.concat(whoisclassified_domians_dfs)\n df_labels = pd.concat(whoisclassified_domains_labels)\n labels_pred = np.concatenate(whoisclassified_domains_prediction)\n postan.saveFpFnDfBis(df_data, df_labels, labels_pred, 'extended/')\n\n np.savez('dfs/' + 'ensemble_extended_det_curve.npz', pos=ensemble_scores_pos, neg=ensemble_scores_neg)\n\n print('AccuracyPosteriori', accuracy_score(ensemble_labels, ensemble_predictions))\n print('F1Posteriori', f1_score(ensemble_labels, ensemble_predictions))\n print('PrecisionPosteriori', precision_score(ensemble_labels, ensemble_predictions))\n print('RecallPosteriori', recall_score(ensemble_labels, ensemble_predictions))\n\n print('Little check', total_amount_2018_positive + total_amount_2018_negative == total_amount_of_2018_domains)\n print('Little check', total_amount_positive_test+total_amount_negative_test == amount_of_test_domains)\n print('Little check', total_pred + total_manual + manual_added_to_trainingset == total_amount_of_2018_domains)\n print('Little check', amount_of_test_domains + manual_added_to_trainingset == total_amount_of_2018_domains)\n\n\n results[strategy + 'workreduced'] = (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains *100\n results[strategy + 'fnr'+ 'posteriori'] = total_fn / total_amount_2018_positive *100\n results[strategy + 'fpr'+ 'posteriori'] = total_fp/total_amount_2018_negative *100\n results[strategy + 'accuracy' + 'posteriori'] = accuracy_score(ensemble_labels, ensemble_predictions) *100\n results[strategy + 'fone' + 'posteriori'] = f1_score(ensemble_labels, ensemble_predictions) *100\n results[strategy + 'precision' + 'posteriori'] = precision_score(ensemble_labels, ensemble_predictions) *100\n results[strategy + 'recall' + 'posteriori'] = recall_score(ensemble_labels, ensemble_predictions) *100\n\n results[strategy + 'accuracy'] = accuracy_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[strategy + 'fone'] = f1_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[strategy + 'precision'] = precision_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[strategy + 'recall'] = recall_score(ensemble_labels_priori, ensemble_predictions_priori) * 100\n results[strategy + 'eer'] = bob.measure.eer(ensemble_scores_neg, ensemble_scores_pos) * 100\n fpr, fnr = bob.measure.farfrr(ensemble_scores_neg, ensemble_scores_pos, 0.5)\n results[strategy + 'fpr'] = fpr*100\n results[strategy + 'fnr'] = fnr*100\n\n print('Accuracy', accuracy_score(ensemble_labels_priori, ensemble_predictions_priori) * 100)\n print('F1', f1_score(ensemble_labels_priori, ensemble_predictions_priori) * 100)\n print('Precision', precision_score(ensemble_labels_priori, ensemble_predictions_priori) * 100)\n print('Recall', recall_score(ensemble_labels_priori, ensemble_predictions_priori) * 100)\n\n total_manual = workreduceddict[0.01]['total_manual']\n results[strategy + 'WorkReducedOnePercent'] = (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains *100\n total_manual = workreduceddict[0.005]['total_manual']\n results[strategy + 'WorkReducedPointFivePercent'] = (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains *100\n total_manual = workreduceddict[0.001]['total_manual']\n results[strategy + 'WorkReducedPointOnePercent'] = (total_amount_of_2018_domains - total_manual - manual_added_to_trainingset) / total_amount_of_2018_domains *100\n\n\n macroify.append_file(results)\n\n" }, { "alpha_fraction": 0.638336718082428, "alphanum_fraction": 0.6423314213752747, "avg_line_length": 56.27927780151367, "blob_id": "3f2521ca9906892d7d0445ddb0543d6285a5382a", "content_id": "5a6b0d098565d765bf124b35a1cfbcb30282cbe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31792, "license_type": "no_license", "max_line_length": 148, "num_lines": 555, "path": "/evaluation_code_and_models/dataprocessing/preprocessing.py", "repo_name": "DistriNet/avalanche-ndss2020", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom sklearn.preprocessing import Binarizer, OneHotEncoder, LabelBinarizer, LabelEncoder\nfrom sklearn.impute import SimpleImputer\nimport numpy as np\n\nimport os\n\ndef loadDataIntersectSamples(sourcepattern, malwareFamily, year):\n '''Loads the data. Every dataset combination leads contains domains of the most restrictive dataset,\n i.e. 1111 (all datasources available). Thus, 1011 will have the same amount of domains as 1111'''\n\n DATAPATH = 'datasets/' + year\n FILENAME1 = 'weka_multi_output_features_all_instances_whois.csv'\n FILENAME2 = 'weka_multi_output_features_all_instances_dnsdb.csv'\n\n whois = pd.read_csv(os.path.join(DATAPATH, FILENAME1))\n whois.index = whois['domain']\n whois.drop(whois.index.duplicated())\n\n label = pd.DataFrame(whois.iloc[:, -1])\n reputation = whois.iloc[:, 0:28]\n openintel = whois.iloc[:, 9:17]\n whois = whois.iloc[:, 28:-1]\n\n print()\n\n dns = pd.read_csv(os.path.join(DATAPATH, FILENAME2), parse_dates=['malware_validity_start', 'malware_validity_end'])\n dns.index = dns['domain']\n dns = dns.drop(dns.index.duplicated())\n dns = dns.iloc[:, 2:13]\n\n #### Open Intel clean up ####\n openintel = pd.concat([openintel, label], axis=1, join='inner')\n openintel = openintel[openintel['openintel_available'] == True]\n # redifine label, as openintel offers least amount of labels\n label = pd.DataFrame(openintel.iloc[:, -1])\n openintel = openintel.drop(['openintel_available', 'class'], axis=1)\n\n more_columns_to_drop = ['openintel_available', 'openintel_first_seen_before_now',\n 'openintel_first_seen_before_validity', 'openintel_nb_days_seen_A',\n 'openintel_nb_days_seen_AAAA', 'openintel_nb_days_seen_MX', 'openintel_nb_days_seen_NS',\n 'openintel_nb_days_seen_SOA']\n reputation = reputation.drop(more_columns_to_drop, axis=1)\n\n ### Dates ###\n reputation['malware_validity_start'] = pd.to_datetime(reputation['malware_validity_start'], unit='s')\n reputation['malware_validity_end'] = pd.to_datetime(reputation['malware_validity_end'], unit='s')\n whois['whois_registration_date'] = pd.to_datetime(whois['whois_registration_date'], unit='s')\n\n # binarize\n if malwareFamily == True:\n to_binarize = reputation.loc[:, ['malware_wordlist_based_dga','ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['malware_wordlist_based_dga', 'ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence', 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n else:\n to_binarize = reputation.loc[:, ['ct_has_certificate', 'topsites_alexa_presence',\n 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n\n # encode categorical feature\n if malwareFamily == True:\n enco = OneHotEncoder()\n categorical = enco.fit_transform(reputation.loc[:, ['malware_family']])\n df = pd.DataFrame(categorical.toarray(), columns=enco.get_feature_names(\n ['malware_family']), index=reputation.index)\n reputation = pd.concat([reputation, df], axis=1)\n reputation = reputation.drop(['malware_family'], axis=1)\n\n # impute search_wayback\n to_impute = reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']] = imputed\n\n #### whois clean up ####\n # impute whois_privacy and whois_temporary_mail with Not known\n booleanDictionary = {True: 'TRUE', False: 'FALSE'}\n whois.loc[:, 'whois_privacy'] = whois.loc[:, 'whois_privacy'].map(booleanDictionary)\n whois.loc[:, 'whois_temporary_mail'] = whois.loc[:, 'whois_temporary_mail'].map(booleanDictionary)\n whois.loc[:, 'whois_has_been_renewed'] = whois.loc[:, 'whois_has_been_renewed'].map(booleanDictionary)\n whois.loc[:, 'whois_valid_phone'] = whois.loc[:, 'whois_valid_phone'].map(booleanDictionary)\n to_impute = whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='Not known').fit_transform(to_impute)\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']] = imputed\n\n # categroical features, those that are imputed\n enc = OneHotEncoder()\n categorical = enc.fit_transform(\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']])\n df = pd.DataFrame(categorical.toarray(), columns=enc.get_feature_names(\n ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']), index=whois.index)\n whois = pd.concat([whois, df], axis=1)\n whois = whois.drop(['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone'], axis=1)\n\n # impute with mean whois_registration_age and whois_registration_and_validity_start_date and whois_registration_period\n to_impute = whois.loc[:, ['whois_registration_age', 'whois_registration_and_family_start_date',\n 'whois_registration_and_validity_start_date', 'whois_registration_period']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='mean').fit_transform(to_impute)\n whois.loc[:,\n ['whois_registration_age', 'whois_registration_and_family_start_date', 'whois_registration_and_validity_start_date',\n 'whois_registration_period']] = imputed\n\n #### dsndb clean up ####\n # impute DNS records to False\n to_impute = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=False).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = imputed\n\n # binarize DNS record booleans\n to_binarize = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n binarized = LabelBinarizer().fit_transform(to_binarize)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = binarized\n\n # impute dns nb_queries, active_period\n to_impute = dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']] = imputed\n\n # impute dns timestamps\n to_impute = dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']] = imputed\n\n #### Join data ####\n post_analysis_columns = ['malware_family', 'malware_wordlist_based_dga'] + \\\n ['domain', 'malware_validity_length',\n 'topsites_alexa_average_rank', 'topsites_majestic_average_rank',\n 'topsites_quantcast_average_rank', 'topsites_umbrella_average_rank',\n 'malware_validity_start', 'malware_validity_end', 'whois_registration_date', 'whois_registrar']\n # ['openintel_available', 'openintel_first_seen_before_now',\n # 'openintel_first_seen_before_validity', 'openintel_nb_days_seen_A',\n # 'openintel_nb_days_seen_AAAA', 'openintel_nb_days_seen_MX', 'openintel_nb_days_seen_NS',\n # 'openintel_nb_days_seen_SOA']\n datasources = [source for p, source in zip(sourcepattern, [reputation, dns, whois, openintel]) if int(p)]\n columnnames = [cn for p, cn in zip(sourcepattern, [reputation.columns, dns.columns, whois.columns, openintel.columns]) if int(p)]\n post_analysis_columns = [x for x in post_analysis_columns if x in np.concatenate(columnnames)]\n print(len(datasources[0].index))\n print(len(label.index))\n data = pd.concat(datasources + [label], axis=1, join='inner')\n features = data.drop(['class']+post_analysis_columns, axis=1)\n labels = data['class']\n post_analysis_labels = data[post_analysis_columns]\n\n # encode the labels\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n # print(le.classes_)\n # print(\"Benign is \", le.transform([\"benign\"]))\n # print(\"** FINAL COLUMNS: **\")\n # print(features.columns)\n # print(features.shape)\n\n return features, labels, post_analysis_labels\n\ndef loadAndCleanDataMaxDom(sourcepattern, malwareFamily, year, whoisdatacompl=True):\n DATAPATH = 'datasets/' + year\n FILENAME = 'weka_multi_output_features_all_instances_none.csv'\n FILENAME1 = 'weka_multi_output_features_all_instances_whois.csv'\n FILENAME2 = 'weka_multi_output_features_all_instances_dnsdb.csv'\n FILENAME3 = 'use_in_weka.csv'\n\n weka = pd.read_csv(os.path.join(DATAPATH, FILENAME3))\n weka.index = weka['domain']\n available = weka.loc[:, ['dnsdb_available', 'whois_available', 'openintel_available']]\n\n none = pd.read_csv(os.path.join(DATAPATH, FILENAME))\n none.index = none['domain']\n reputation = none.iloc[:, 0:28]\n label = pd.DataFrame(none['class'])\n\n whois = pd.read_csv(os.path.join(DATAPATH, FILENAME1))\n whois.index = whois['domain']\n\n whois = whois.iloc[:, 28:-1]\n openintel = none.iloc[:, 9:17]\n openintel = openintel[openintel['openintel_available'] == True]\n\n print()\n\n dns = pd.read_csv(os.path.join(DATAPATH, FILENAME2), parse_dates=['malware_validity_start', 'malware_validity_end'])\n dns.index = dns['domain']\n dns = dns.iloc[:, 2:13]\n available_dns = pd.concat([dns, available], axis=1, join='inner')\n dns = dns[available_dns['dnsdb_available'] == True]\n\n\n #### Open Intel clean up ####\n # redifine label, as openintel offers least amount of labels\n openintel = openintel.drop(['openintel_available'], axis=1)\n\n #### Reputation clean up ####\n more_columns_to_drop = ['openintel_available', 'openintel_first_seen_before_now',\n 'openintel_first_seen_before_validity', 'openintel_nb_days_seen_A',\n 'openintel_nb_days_seen_AAAA', 'openintel_nb_days_seen_MX', 'openintel_nb_days_seen_NS',\n 'openintel_nb_days_seen_SOA']\n reputation = reputation.drop(more_columns_to_drop, axis=1)\n\n ### Dates ###\n reputation['malware_validity_start'] = pd.to_datetime(reputation['malware_validity_start'], unit='s')\n reputation['malware_validity_end'] = pd.to_datetime(reputation['malware_validity_end'], unit='s')\n whois['whois_registration_date'] = pd.to_datetime(whois['whois_registration_date'], unit='s')\n\n # binarize\n if malwareFamily == True:\n to_binarize = reputation.loc[:, ['malware_wordlist_based_dga','ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['malware_wordlist_based_dga', 'ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence', 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n else:\n to_binarize = reputation.loc[:, ['ct_has_certificate', 'topsites_alexa_presence',\n 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n\n # encode categorical feature\n if malwareFamily == True:\n enco = OneHotEncoder()\n categorical = enco.fit_transform(reputation.loc[:, ['malware_family']])\n df = pd.DataFrame(categorical.toarray(), columns=enco.get_feature_names(\n ['malware_family']), index=reputation.index)\n reputation = pd.concat([reputation, df], axis=1)\n reputation = reputation.drop(['malware_family'], axis=1)\n\n # impute search_wayback\n to_impute = reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']] = imputed\n\n #### whois clean up ####\n # impute whois_privacy and whois_temporary_mail with Not known\n booleanDictionary = {True: 'TRUE', False: 'FALSE'}\n whois.loc[:, 'whois_privacy'] = whois.loc[:, 'whois_privacy'].map(booleanDictionary)\n whois.loc[:, 'whois_temporary_mail'] = whois.loc[:, 'whois_temporary_mail'].map(booleanDictionary)\n whois.loc[:, 'whois_has_been_renewed'] = whois.loc[:, 'whois_has_been_renewed'].map(booleanDictionary)\n whois.loc[:, 'whois_valid_phone'] = whois.loc[:, 'whois_valid_phone'].map(booleanDictionary)\n to_impute = whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='Not known').fit_transform(to_impute)\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']] = imputed\n\n # categroical features, those that are imputed\n whoisdatacomplete = whoisdatacompl\n if whoisdatacomplete:\n enc = OneHotEncoder()\n categorical = enc.fit_transform(\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']])\n df = pd.DataFrame(categorical.toarray(), columns=enc.get_feature_names(\n ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']),\n index=whois.index)\n whois = pd.concat([whois, df], axis=1)\n whois = whois.drop(['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone'],\n axis=1)\n else:\n\n enc = OneHotEncoder()\n categorical = enc.fit_transform(\n whois.loc[:, ['whois_privacy', 'whois_valid_phone']])\n df = pd.DataFrame(categorical.toarray(), columns=enc.get_feature_names(\n ['whois_privacy', 'whois_valid_phone']), index=whois.index)\n whois = pd.concat([whois, df], axis=1)\n whois = whois.drop(['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone'], axis=1)\n\n # impute with mean whois_registration_age and whois_registration_and_validity_start_date and whois_registration_period\n to_impute = whois.loc[:, ['whois_registration_age', 'whois_registration_and_family_start_date',\n 'whois_registration_and_validity_start_date', 'whois_registration_period']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='mean').fit_transform(to_impute)\n whois.loc[:,\n ['whois_registration_age', 'whois_registration_and_family_start_date', 'whois_registration_and_validity_start_date',\n 'whois_registration_period']] = imputed\n\n #### dsndb clean up ####\n # impute DNS records to False\n to_impute = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=False).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = imputed\n\n # binarize DNS record booleans\n to_binarize = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n binarized = LabelBinarizer().fit_transform(to_binarize)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = binarized\n\n # impute dns nb_queries, active_period\n to_impute = dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']] = imputed\n\n # impute dns timestamps\n to_impute = dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']] = imputed\n\n #### Join data ####\n post_analysis_columns = ['malware_family', 'malware_wordlist_based_dga'] + \\\n ['domain', 'malware_validity_length',\n 'topsites_alexa_average_rank', 'topsites_majestic_average_rank',\n 'topsites_quantcast_average_rank', 'topsites_umbrella_average_rank',\n 'malware_validity_start', 'malware_validity_end', 'whois_registration_date', 'whois_registrar']\n # ['openintel_available', 'openintel_first_seen_before_now',\n # 'openintel_first_seen_before_validity', 'openintel_nb_days_seen_A',\n # 'openintel_nb_days_seen_AAAA', 'openintel_nb_days_seen_MX', 'openintel_nb_days_seen_NS',\n # 'openintel_nb_days_seen_SOA']\n datasources = [source for p, source in zip(sourcepattern, [reputation, dns, whois, openintel]) if int(p)]\n columnnames = [cn for p, cn in\n zip(sourcepattern, [reputation.columns, dns.columns, whois.columns, openintel.columns]) if int(p)]\n post_analysis_columns = [x for x in post_analysis_columns if x in np.concatenate(columnnames)]\n data = pd.concat(datasources + [label], axis=1, join='inner')\n features = data.drop(['class'] + post_analysis_columns, axis=1)\n labels = data['class']\n post_analysis_labels = data[post_analysis_columns]\n\n # encode the labels\n le = LabelEncoder()\n labels = le.fit_transform(labels)\n\n # print(le.classes_)\n # print(\"Benign is \", le.transform([\"benign\"]))\n # print(\"** FINAL COLUMNS: **\")\n # print(features.columns)\n # print(features.shape)\n\n return features, labels, post_analysis_labels\n\ndef loadAndCleanDataPerDataSet(malwareFamily, year, whoisdatacompl=True):\n '''\n Contains all data\n :param malwareFamily: whether to include malware family as a feature\n :param year: dataset\n :return:\n '''\n DATAPATH = 'datasets/' + year\n FILENAME = 'weka_multi_output_features_all_instances_none.csv'\n FILENAME1 = 'weka_multi_output_features_all_instances_whois.csv'\n FILENAME2 = 'weka_multi_output_features_all_instances_dnsdb.csv'\n FILENAME3 = 'use_in_weka.csv'\n\n weka = pd.read_csv(os.path.join(DATAPATH, FILENAME3))\n weka.index = weka['domain']\n weka = weka.drop_duplicates()\n available = weka.loc[:,['dnsdb_available', 'whois_available', 'openintel_available']]\n\n none = pd.read_csv(os.path.join(DATAPATH, FILENAME))\n none.index = none['domain']\n none = none.drop_duplicates()\n none = none.loc[none['ct_has_certificate'].isnull()==False]\n label = none.iloc[:,-1]\n reputation = none.iloc[:, 0:28]\n\n whois = pd.read_csv(os.path.join(DATAPATH, FILENAME1))\n whois.index = whois['domain']\n whois = whois.drop_duplicates()\n\n # label = pd.DataFrame(whois.iloc[:, -1])\n openintel = none.iloc[:, 9:17]\n whois = whois.iloc[:, 28:-1]\n\n dns = pd.read_csv(os.path.join(DATAPATH, FILENAME2), parse_dates=['malware_validity_start', 'malware_validity_end'])\n dns.index = dns['domain']\n dns = dns.drop_duplicates()\n dns = dns.iloc[:, 2:13]\n ind_intersection = available[available['dnsdb_available']==True].index.intersection(dns.index)\n dns = dns.loc[ind_intersection]\n\n\n #### Open Intel clean up ####\n openintel = openintel[openintel['openintel_available'] == True]\n # redifine label, as openintel offers least amount of labels\n # label = pd.DataFrame(openintel.iloc[:, -1])\n openintel = openintel.drop(['openintel_available'], axis=1)\n\n\n more_columns_to_drop = ['openintel_available', 'openintel_first_seen_before_now',\n 'openintel_first_seen_before_validity', 'openintel_nb_days_seen_A',\n 'openintel_nb_days_seen_AAAA', 'openintel_nb_days_seen_MX', 'openintel_nb_days_seen_NS',\n 'openintel_nb_days_seen_SOA', 'malware_family', 'malware_wordlist_based_dga',\n 'topsites_alexa_average_rank', 'topsites_majestic_average_rank',\n 'topsites_quantcast_average_rank', 'topsites_umbrella_average_rank',\n 'malware_validity_start', 'malware_validity_end', 'domain', 'malware_validity_length'\n ]\n reputation = reputation.drop(more_columns_to_drop, axis=1)\n\n ### Dates ###\n # reputation['malware_validity_start'] = pd.to_datetime(reputation['malware_validity_start'], unit='s')\n # reputation['malware_validity_end'] = pd.to_datetime(reputation['malware_validity_end'], unit='s')\n whois['whois_registration_date'] = pd.to_datetime(whois['whois_registration_date'], unit='s')\n\n # binarize\n if malwareFamily == True:\n to_binarize = reputation.loc[:, ['malware_wordlist_based_dga','ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['malware_wordlist_based_dga', 'ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence', 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n else:\n to_binarize = reputation.loc[:, ['ct_has_certificate', 'topsites_alexa_presence',\n 'topsites_majestic_presence',\n 'topsites_quantcast_presence', 'topsites_umbrella_presence']]\n binarized = Binarizer().transform(to_binarize)\n reputation.loc[:,\n ['ct_has_certificate', 'topsites_alexa_presence', 'topsites_majestic_presence',\n 'topsites_quantcast_presence',\n 'topsites_umbrella_presence']] = binarized\n\n # encode categorical feature\n if malwareFamily == True:\n enco = OneHotEncoder()\n categorical = enco.fit_transform(reputation.loc[:, ['malware_family']])\n df = pd.DataFrame(categorical.toarray(), columns=enco.get_feature_names(\n ['malware_family']), index=reputation.index)\n reputation = pd.concat([reputation, df], axis=1)\n reputation = reputation.drop(['malware_family'], axis=1)\n\n # impute search_wayback\n to_impute = reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n reputation.loc[:, ['search_pages_found_wayback_machine', 'search_wayback_machine_first_seen_before_now',\n 'search_wayback_machine_first_seen_before_validity']] = imputed\n\n #### whois clean up ####\n # impute whois_privacy and whois_temporary_mail with Not known\n booleanDictionary = {True: 'TRUE', False: 'FALSE'}\n whois.loc[:, 'whois_privacy'] = whois.loc[:, 'whois_privacy'].map(booleanDictionary)\n whois.loc[:, 'whois_temporary_mail'] = whois.loc[:, 'whois_temporary_mail'].map(booleanDictionary)\n whois.loc[:, 'whois_has_been_renewed'] = whois.loc[:, 'whois_has_been_renewed'].map(booleanDictionary)\n whois.loc[:, 'whois_valid_phone'] = whois.loc[:, 'whois_valid_phone'].map(booleanDictionary)\n to_impute = whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='Not known').fit_transform(to_impute)\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']] = imputed\n\n # categroical features, those that are imputed\n whoisdatacomplete = whoisdatacompl\n if whoisdatacomplete:\n enc = OneHotEncoder()\n categorical = enc.fit_transform(\n whois.loc[:, ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']])\n df = pd.DataFrame(categorical.toarray(), columns=enc.get_feature_names(\n ['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone']),\n index=whois.index)\n whois = pd.concat([whois, df], axis=1)\n whois = whois.drop(['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone'],\n axis=1)\n else:\n\n enc = OneHotEncoder()\n categorical = enc.fit_transform(\n whois.loc[:, ['whois_privacy', 'whois_valid_phone']])\n df = pd.DataFrame(categorical.toarray(), columns=enc.get_feature_names(\n ['whois_privacy', 'whois_valid_phone']), index=whois.index)\n whois = pd.concat([whois, df], axis=1)\n whois = whois.drop(['whois_privacy', 'whois_temporary_mail', 'whois_has_been_renewed', 'whois_valid_phone'],\n axis=1)\n whois = whois.drop(['whois_registration_date', 'whois_registrar'], axis=1)\n\n # impute with mean whois_registration_age and whois_registration_and_validity_start_date and whois_registration_period\n to_impute = whois.loc[:, ['whois_registration_age', 'whois_registration_and_family_start_date',\n 'whois_registration_and_validity_start_date', 'whois_registration_period']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='mean').fit_transform(to_impute)\n whois.loc[:,\n ['whois_registration_age', 'whois_registration_and_family_start_date', 'whois_registration_and_validity_start_date',\n 'whois_registration_period']] = imputed\n\n #### dsndb clean up ####\n # impute DNS records to False\n to_impute = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=False).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = imputed\n\n # binarize DNS record booleans\n to_binarize = dns.loc[:,\n ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']]\n binarized = LabelBinarizer().fit_transform(to_binarize)\n dns.loc[:, ['dnsdb_record_A', 'dnsdb_record_AAAA', 'dnsdb_record_CNAME', 'dnsdb_record_MX', 'dnsdb_record_NS',\n 'dnsdb_record_SOA', 'dnsdb_record_TXT']] = binarized\n\n # impute dns nb_queries, active_period\n to_impute = dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_active_period', 'dnsdb_nb_queries']] = imputed\n\n # impute dns timestamps\n to_impute = dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']]\n imputed = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0).fit_transform(to_impute)\n dns.loc[:, ['dnsdb_first_seen_before_now', 'dnsdb_first_seen_before_validity']] = imputed\n\n return available, reputation, dns, whois, openintel, label\n\ndef loadAndCleanDataExactPattern(code, available, reputation, dns, whois, openintel, label):\n if code[0]:\n df = available.loc[(available['dnsdb_available'] == code[1]) &\n (available['whois_available'] == code[2]) &\n (available['openintel_available'] == code[3])]\n datasets = [ds for ds, i in zip([dns, whois, openintel], code[1:]) if i]\n features = pd.concat([reputation] + datasets + [df], axis=1, join='inner')\n features = features.drop(['dnsdb_available', 'whois_available', 'openintel_available'], axis=1)\n\n labelzz = pd.concat([label, df], axis=1, join='inner')\n labelzz = labelzz.loc[:, 'class']\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzz = labelzz.map(classDictionary)\n else:\n features = pd.DataFrame()\n labelzz = pd.Series()\n\n\n return features.sort_index(), labelzz.sort_index()\n\ndef loadAndCleanDataExactPatternAlt(code, available, reputation, dns, whois, openintel, label):\n\n df = available.loc[(available['reputation_available'] == code[0]) &\n (available['dnsdb_available'] == code[1]) &\n (available['whois_available'] == code[2]) &\n (available['openintel_available'] == code[3])]\n datasets = [ds for ds, i in zip([reputation, dns, whois, openintel], code) if i]\n features = pd.concat(datasets + [df], axis=1, join='inner')\n features = features.drop(['reputation_available','dnsdb_available', 'whois_available', 'openintel_available'], axis=1)\n\n labelzz = pd.concat([label, df], axis=1, join='inner')\n labelzz = labelzz.loc[:, 'class']\n classDictionary = {'malicious': 1, 'benign': 0}\n labelzz = labelzz.map(classDictionary)\n\n return features.sort_index(), labelzz.sort_index()\n\n\n" } ]
19
sumit026/anagram_engine
https://github.com/sumit026/anagram_engine
03e2716d5600cfa6d6a8e7f2edbe55ceeed9af73
128e4822e41cfc7fb6113c676393a1351463915a
c440b36ce9dc41e5bd475b908daf43beb947e54a
refs/heads/master
2022-11-12T21:11:33.328401
2020-07-09T11:19:27
2020-07-09T11:19:27
278,341,549
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6144455671310425, "alphanum_fraction": 0.617497444152832, "avg_line_length": 24.86842155456543, "blob_id": "3697e3e38d1f01340ba9ca9ffce3de83d147bb21", "content_id": "9deda5992e313faee463fd49b374c98f35749b28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 983, "license_type": "no_license", "max_line_length": 76, "num_lines": 38, "path": "/main.py", "repo_name": "sumit026/anagram_engine", "src_encoding": "UTF-8", "text": "from google.appengine.ext import ndb\nimport webapp2\nimport logging\nimport template\nimport function\nfrom anagram import Anagram\nfrom add import Add\nfrom search import Search\nfrom upload import Upload\nfrom subanagram import SubAnagram\n\n\nclass MainPage(webapp2.RequestHandler):\n def get(self):\n logging.debug(\"GET\")\n self.response.headers['Content-Type'] = 'text/html'\n my_user = function.userKey()\n \n if function.userLoggedIn():\n if not function.userExist():\n function.newUser(function.currentUser())\n\n template.main(self, function.logoutUrl(self), my_user,\n function.usersAnagrams(function.userKey()))\n\n else:\n template.login(self, function.loginUrl(self))\n\n\n\napp = webapp2.WSGIApplication(\n [\n ('/', MainPage),\n ('/add', Add),\n ('/search', Search),\n ('/upload', Upload),\n ('/subanagram', SubAnagram),\n ], debug=True)\n" }, { "alpha_fraction": 0.5831435322761536, "alphanum_fraction": 0.5854213833808899, "avg_line_length": 37.130435943603516, "blob_id": "bb1a39664b75fe34d9ee5259b9d702fe8f78b704", "content_id": "43ef586ded56163d580c008ae53e8adbe4f99764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 97, "num_lines": 23, "path": "/search.py", "repo_name": "sumit026/anagram_engine", "src_encoding": "UTF-8", "text": "from google.appengine.ext import ndb\nimport webapp2\nimport logging\nimport template\nimport function\nfrom anagram import Anagram\n\n\nclass Search(webapp2.RequestHandler):\n def get(self):\n logging.debug(\"GET\")\n self.response.headers['Content-Type'] = 'text/html'\n my_user = function.userKey()\n if function.userLoggedIn():\n if not function.userExist():\n function.newUser(function.currentUser())\n \n template.searchtext(self, function.logoutUrl(self), my_user,\n function.lexicographical_order(self.request.get('value')),\n function.inputResult(self.request.get('value')),\n function.usersAnagrams(function.userKey()))\n else:\n template.login(self, function.loginUrl(self))\n\n" }, { "alpha_fraction": 0.5864709615707397, "alphanum_fraction": 0.5877472758293152, "avg_line_length": 31.625, "blob_id": "119d28a648fae30458653cb2ffe5f788dab0ace3", "content_id": "4373e71b3bbf5555ecc8bd7eb7f2b136616b2415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1567, "license_type": "no_license", "max_line_length": 79, "num_lines": 48, "path": "/add.py", "repo_name": "sumit026/anagram_engine", "src_encoding": "UTF-8", "text": "from google.appengine.ext import ndb\nimport webapp2\nimport logging\nimport template\nimport function\nfrom anagram import Anagram\n\n\nclass Add(webapp2.RequestHandler):\n def get(self):\n logging.debug(\"GET\")\n self.response.headers['Content-Type'] = 'text/html'\n my_user = function.userKey()\n if function.userLoggedIn():\n if not function.userExist():\n function.newUser(function.currentUser())\n \n template.add(self, function.logoutUrl(self), my_user,\n function.usersAnagrams(function.userKey()))\n\n else:\n template.login(self, function.loginUrl(self))\n\n def post(self):\n logging.debug(\"POST\")\n self.response.headers['Content-Type'] = 'text/html'\n\n my_user = function.userKey()\n button = self.request.get('button')\n input_text = function.inputResult(self.request.get('value'))\n logging.debug(input_text)\n logging.debug(button)\n\n if button == 'Add':\n self.add(input_text, my_user)\n self.redirect('/add')\n\n\n def add(self, text, my_user):\n logging.debug('Add ' + text) \n if text is not None or text != '':\n anagram_id = function.lexicographical_order(text)\n anagram_key = ndb.Key(Anagram, anagram_id)\n anagrams = anagram_key.get()\n if anagrams:\n function.add_anagram(my_user, text, anagram_key) \n else:\n function.add_newAnagram(my_user, text, anagram_id, anagram_key)\n\n" }, { "alpha_fraction": 0.6286817789077759, "alphanum_fraction": 0.6306132078170776, "avg_line_length": 26.613332748413086, "blob_id": "fef70b8490e5ea306e085cfacaac840ad26b371a", "content_id": "3b75e7d9e15bef23532c304af701a9211acde5a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 77, "num_lines": 75, "path": "/template.py", "repo_name": "sumit026/anagram_engine", "src_encoding": "UTF-8", "text": "import jinja2\nimport os\nimport function\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__) + \"/templates\"),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\n\ndef login(self, url):\n template_values = {'url': url}\n\n template = JINJA_ENVIRONMENT.get_template('login.html')\n self.response.write(template.render(template_values))\n\n\ndef main(self, url, my_user, anagrams):\n template_values = {\n 'url': url,\n 'user': function.currentUser(),\n 'my_user': my_user,\n 'anagrams': anagrams,\n }\n\n template = JINJA_ENVIRONMENT.get_template('main.html')\n self.response.write(template.render(template_values))\n\ndef add(self, url, my_user, anagrams):\n template_values = {\n 'url': url,\n 'user': function.currentUser(),\n 'my_user': my_user,\n 'anagrams': anagrams,\n }\n\n template = JINJA_ENVIRONMENT.get_template('add.html')\n self.response.write(template.render(template_values))\n\n\ndef searchtext(self, url, my_user, value, input_text, anagrams):\n template_values = {\n 'url': url,\n 'user': function.currentUser(),\n 'my_user': my_user,\n 'value': value,\n 'input_text': input_text,\n 'anagrams': anagrams,\n }\n\n template = JINJA_ENVIRONMENT.get_template('search.html')\n self.response.write(template.render(template_values))\n\ndef upload(self, url, my_user, anagrams):\n template_values = {\n 'url': url,\n 'user': function.currentUser(),\n 'my_user': my_user,\n 'anagrams': anagrams,\n }\n\n template = JINJA_ENVIRONMENT.get_template('fileupload.html')\n self.response.write(template.render(template_values))\n\ndef subanagram(self, url, my_user, value, anagrams):\n template_values = {\n 'url': url,\n 'user': function.currentUser(),\n 'my_user': my_user,\n 'value': value,\n 'anagrams': anagrams,\n }\n\n template = JINJA_ENVIRONMENT.get_template('subanagram.html')\n self.response.write(template.render(template_values))\n" } ]
4
DeepakAkkara/COVID-19-Severity-Analysis
https://github.com/DeepakAkkara/COVID-19-Severity-Analysis
71c7335b394590d4931b0c12509047288c6b1410
6de3a8f61d3d50a82fd8bd38adb2b6506d1e078c
ebbff0c9d1b0f4023278f03d69779e166b88fbe1
refs/heads/master
2023-07-22T13:26:56.790331
2020-12-07T04:30:04
2020-12-07T04:30:04
405,573,058
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6939793229103088, "alphanum_fraction": 0.7011044025421143, "avg_line_length": 28.868131637573242, "blob_id": "f3f1f6fa478c4cab60bf24c68519c705667e268c", "content_id": "d1b974ddb325cacd3596f222b63ab2ecac7697c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2807, "license_type": "no_license", "max_line_length": 158, "num_lines": 91, "path": "/Midterm_report.py", "repo_name": "DeepakAkkara/COVID-19-Severity-Analysis", "src_encoding": "UTF-8", "text": "#Deepak - PCA on Numerical data (not one-hot encoded data)\r\nfrom sklearn.decomposition import PCA\r\ndf\r\n#lets find all the columns with numerical (non-categorical data)\r\ncols_with_numeric_data = [\"age\", \"Ht\", \"Wgt\", \"BMI\", \"RRadmit\", \"HRadmit\", \"Systolic\",\"Diastolic\", \"tempadmit\", \"O2admit\", \"OnsetDays\", \"Num_COVID_Symptoms\", \r\n\"Num_Other_Risk_Factor\", \"WBC\", \"Lympho\", \"Hg\", \"Plts\", \"AST\", \"Ddimer\", \"LDH\", \"CRP_Max_19k\", \"Ferrit\", \"Low02D1\", \"AvgMaxTemp\", \"LOS\"]\r\ncols_with_categorical_data = []\r\nall_cols = list(df)\r\n# print(cols_with_numeric_data)\r\n# for i in cols_with_numeric_data:\r\n# print(df[i])\r\n# print(all_cols)\r\nfor i in all_cols:\r\n if i not in cols_with_numeric_data:\r\n cols_with_categorical_data.append(i)\r\n# print(cols_with_categorical_data)\r\n\r\nnumeric_df = df[cols_with_numeric_data]\r\ncategorical_df = df[cols_with_categorical_data]\r\nnumeric_df\r\n#numeric df is df with only numeric columns\r\npca = PCA(n_components=5)\r\npca.fit(numeric_df)\r\n#print(pca_df)\r\n#pca.transform(numeric_df)\r\npca_df = pd.DataFrame(pca.components_) \r\npca_df\r\n\r\n\r\n#plot the variance\r\npca.n_components = 25\r\npca_data = pca.fit_transform(numeric_df)\r\npercent_var_explained = pca.explained_variance_ / np.sum(pca.explained_variance_)\r\ncum_var_explained = np.cumsum(percent_var_explained)\r\n\r\n#plotting section\r\nplt.figure(1, figsize = (6,4))\r\nplt.clf()\r\nplt.plot(cum_var_explained, linewidth = 2)\r\nplt.axis('tight')\r\nplt.grid()\r\nplt.xlabel('n_components')\r\nplt.ylabel('Cumulative_explained_variance')\r\nplt.show()\r\n\r\n#Next\r\nlabels = list(numeric_df)\r\npca.n_components = 25\r\nlabeled_data = pca.fit_transform(numeric_df)\r\nlabeled_data = np.vstack((labeled_data.T, labels)).T\r\nlabeled_data\r\n\r\n\r\n\r\n#Deepak PCA projection\r\nfrom yellowbrick.features import ParallelCoordinates\r\nfrom yellowbrick.datasets import load_occupancy\r\n\r\n\r\n# Load the classification data set\r\n#X, y = load_occupancy()\r\nX = numericals[['Ddimer', 'WBC', 'age', 'LOS']]\r\ny = numericals['Death_ICU']\r\n#print(X[0].shape)\r\nprint(y[0].shape)\r\n# Specify the features of interest and the classes of the target\r\nfeatures = [\"Ddimer\", \"WBC\", \"age\", \"LOS\"]\r\nclasses = [\"alive\", \"dead\"]\r\n\r\n# Instantiate the visualizer\r\nvisualizer = ParallelCoordinates(classes=classes, features=features, sample=0.05, shuffle=True)\r\n\r\n# Fit and transform the data to the visualizer\r\nvisualizer.fit_transform(X, y)\r\n\r\n# Finalize the title and axes then display the visualization\r\nvisualizer.show()\r\n\r\n\r\n#from yellowbrick.datasets import load_credit\r\n# from yellowbrick.features import PCA as prinCompAnal\r\n\r\n# # Specify the features of interest and the target\r\n# #X, y = load_credit()\r\n# #classes = ['account in default', 'current with bills']\r\n# X = numericals\r\n# y = numericals['Death_ICU']\r\n\r\n# visualizer = PCA()\r\n# visualizer.fit_transform(X, y)\r\n# visualizer.show()" }, { "alpha_fraction": 0.7890339493751526, "alphanum_fraction": 0.7971279621124268, "avg_line_length": 100.1660385131836, "blob_id": "3a394b20043c8ae83afb74e5d77287dc4489f151", "content_id": "5df124130a672e3ee8c7f6d1aaeea45baa8e3c61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 26844, "license_type": "no_license", "max_line_length": 989, "num_lines": 265, "path": "/README.md", "repo_name": "DeepakAkkara/COVID-19-Severity-Analysis", "src_encoding": "UTF-8", "text": "# **COVID-19 Case Severity Analysis**\n<p align=\"center\">\n <img src=\"assets/Design%20Process.png\" width=50%/>\n</p>\n\n## **Introduction**\nWe are going to be analyzing how different factors affect the severity of COVID-19 in an infected patient.\nWe will be looking primarily at pre-existing diseases, as this is theorized to increase the chance of\nserious illness or even death from coronavirus. In addition, we will also take into account how\ndemographic factors like age, race, sex, and financial status affect the severity.\nWe know that having a pre-existing disease will increase the chance of medical problems related to coronavirus,\nbut we do not know how much each disease affects the severity. We hope this intersection of\npre-existing diseases and demographics with severity of illness in COVID-19 patients will lead to insightful information about the virus. \n\n\n## **Background**\nHospitals have been overcrowded with COVID patients since the pandemic started.\nCOVID is a deadly virus that has killed over 1 million worldwide and 208,000 people in the United States,\nand these numbers will continue to increase.\nToo many people are dying, so we need to minimize these deaths as much as possible by prioritizing beds for the most vulnerable. \nOur goal is to produce an algorithm that can assign patients a severity level based on factors such as age, sex, race, and pre-existing conditions.\nThis will help guide the hospitals in determining who to prioritize when there is a shortage of beds.\nDuring the semester, we hope to be able to determine which conditions will leave somebody the\nmost vulnerable to severe complications or even death, and we hope hospitals can use this information to assign beds to those people,\nand in the long run, save lives.\n\n## **Methods**\n\n### Unsupervised Learning\nIn any machine learning task, the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) is\nsomething that must be dealt with.\nWhat this means is that as the dimensionality (or number of features in the dataset) increases,\nthe volume of the feature space in which we're working increases rapidly.\nAs a result of this, it becomes necessary to reduce the dimension into a context that we can more easily work with.\nBy reducing the amount of dimensions/features we have to process, we are able to analyze fewer relationships between\nfeatures and reduce the computational power needed and the likelihood of overfitting.\n\n#### [Pair Plots](https://seaborn.pydata.org/generated/seaborn.pairplot.html)\nPair plots are a useful unsupervised visualization for determining pairwise relationships among features in a dataset. Each graph in a pairplot is a scatterplot of data points only considering each pair of features. On the diagonal, a univariate distribution of each feature is shown instead. \n\nWe decided to create pairplots for all numerical columns in the dataset, since continuous values can be plotted much more easily on a pairplot than one-hot encoded categorical features.\n\n#### [Correlation Matrix](https://en.wikipedia.org/wiki/Correlation_and_dependence#Correlation_matrices)\nOne of the most important unsupervised techniques we used was creating a correlation matrix of all of our numerical data. This allows us to see which features are most strongly correlated with one another, helping us eliminate redundancy in the future as well as find the columns most strongly correlated with ending up in the ICU or dying.\n\n#### [Principal Component Analysis](https://en.wikipedia.org/wiki/Principal_component_analysis)\nA common algorithm in dimensionality reduction is principal component analysis (PCA). \nWe ran principal component analysis on the numerical data that we had; however the results were not very useful to us and we did not pursue the algorithm further. Our project involves determining which features increase the probability of dying or being severely impacted (ICU) by COVID-19, but PCA projects our feature set onto a new basis entirely, which does not allow us to select the most important original features.\n\n\n#### [t-SNE](https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding)\nt-distributed stochastic neighbor embedding is another dimensionality reduction technique, typically used to provide easy 2D or 3D\nvisualizations of high dimensional data, which is extremely useful for interpreting results visually. It works by constructing a similarity probability distribution for each pair of points using Euclidean distance, then mapping each point into a smaller feature space that best represents the similarity distribution.\n\n#### [K-Means Clustering](https://en.wikipedia.org/wiki/K-means_clustering)\nWe used the k-means clustering algorithm on our data after projecting into a 2D space for visualizations using t-SNE. Since we know our non-target features come from two latent classes (those who ended up in the ICU and those who didn’t), we decided to use 2 clusters and visualize the k-means clusters in the dimensionality reduced data.\n\n### Supervised Learning\nThe point of any machine learning task is to get some actionable results out of the data that we put in,\nand supervised learning will help us achieve that goal.\nThe main methods being considered are chi-squared feature selection and various naive Bayes classifiers.\n\n#### [Chi-Squared Feature Selection](https://towardsdatascience.com/chi-square-test-for-feature-selection-in-machine-learning-206b1f0b8223)\nAfter conducting unsupervised analysis on the numerical data, we wanted to perform analysis on the qualitative data and determined that the chi-squared test for feature selection would be helpful in figuring out which variables are dependent with whether or not a patient lives or dies. We ran sklearn.feature_selection.chi2 on the data with the “died” feature as the target parameter and plotted the results with the highest chi-squared statistics.\n\n#### [Naive Bayes Classification and Posterior Probability Prediction](https://scikit-learn.org/stable/modules/naive_bayes.html)\n\nSince our ultimate goal is to predict a COVID-19 patient's prognosis, we decided to use a number of variations of Naive Bayes classifiers to predict whether a patient will either die or end up in the ICU. Naive Bayes classifiers use the \"naive Bayesian assumption\" that all features are conditionally independent given the datapoint's classification; that is to say, all symptoms, preexisting conditions, comorbidities, and demographics are conditionally independent given that we know whether the patient either died or is in the ICU. Using this assumption, we can predict posterior probabilities of belonging to either class label, and use these to classify a testing set. Naive Bayes classifiers are typically used for text classification, but the size of our dataset and types of our features led us to believe that they would be a viable option.\n\n##### Multinomial Naive Bayes\nMultinomial Naive Bayes classifiers are the most commonly used version of naive Bayes classifiers, and they assume that the data is multinomially-distributed. [scikit-learn's implementation](https://scikit-learn.org/stable/modules/generated/sklearn.naive_bayes.MultinomialNB.html) of the multinomial classifier is intended for use with a \"bag of words\" representation of text-based data, in which each column represents a word and a datapoint's value in that column represents the number of times that word appears. We chose to test this implementation on the continuous numerical features in our dataset. \n\n##### Complement Naive Bayes\nComplement Naive Bayes classifiers are a modification on multinomial classifiers that are specifically designed to deal with *class imbalance*; if the probability of a specific ground-truth categorization appearing dominates the probability of other categorizations, then the training set is *imbalanced*. Multinomial naive Bayes classifiers tend to perform very poorly when trained with imbalanced data because they overrepresent the probability of the more common class appearing in testing data. Since our training data had minor class imbalance, we chose to try using complement naive Bayes as well on our numerical data.\n\n##### Bernoulli Naive Bayes\nBernoulli Naive Bayes classifiers assume that the data is distributed according to multivariate Bernoulli distributions -- this means that each feature is a boolean-valued feature, where the feature is either present or absent. These classifiers are typically used for text-based classification where each column represents a word, and each datapoint's value in that column is either 0 (indicating the word is absent from the document) or 1 (indicating the word is present). We used this classifier on all of our one-hot encoded, categorical features in our data.\n\n## **Results**\n\n### **Unsupervised Results**\n****************************\nUpon downloading our dataset, which was composed of patient-by-patient data describing things like sex, age, preexisting conditions, and symptoms, the first thing we had to do was make it suitable for machine learning methods in general. This means we had to eliminate columns/features that were extraneous or unrelated to our problem (such as factors which would be unknown at the time of arrival to the hospital and factors which were uniform over all patients). Then, we used pandas to convert our dataset into a dataframe, encoded categorical data into a one-hot format, and normalized data for use in a correlation map. Next, we moved on to performing key unsupervised learning techniques on our dataset, such as visualizations (correlation plots and heatmaps) and clustering (K-means). These techniques provided insight into the structure of our data, what features correlated with others, what we could do to make supervised learning easier, and how the data clustered in its space.\n\n#### Pairplots\n<p align=\"center\">\n <img src=\"assets/pairplot clean.png\" width=50%/>\n <br>\n Pairplots showing correlations and relationships between all numerical features and delimited by death/ICU. Orange patients died and/or were in the ICU.\n (click for more)\n</p>\n\n<p align=\"center\">\n <img src=\"assets/pairplot detail.png\" width=50%/>\n <br>\n Pairplot detail\n</p>\n\nWhat these pair plots showed is not only the relationships of numerical factors with each other,\nbut equally importantly, the univariate distributions of these factors split up based on class. \nAs shown, the distributions can help decide the relative importance of each factor by\nshowing discrepancies between distributions for dead/seriously-affected patients and non-ICU patients.\nThere were a number of pairplots which revealed some insight. For example, the length of stay (LOS) feature, shows that on average, patients who stayed in the hospital longer tended to be more likely to die and/or end up in the ICU. \n\nIn addition, we can see extremes in different categories in which almost everyone who had above or below a threshold in a specific category had the same outcome. For example, out of the 9 people who had a Low02D1 value less than 82, 8 of them had a severe case of COVID-19. Similarly, every patient with an O2 level below 80 at hospital admission had a severe case of COVID-19. Also, every patient with a ferritin value of over 4000, avoided both dying and going to the ICU. \n\nFinally, there were a few plots where it appeared a combination of factors resulted in a higher chance of a severe case of COVID-19. Patients with a higher number of ‘Num_Other_Risk_Factors’ were more likely to die and patients who were older (had a higher value in the age feature). In the pair plot between these two categories there was a slight positive relationship between the features.\n\n\n#### Correlation Matrix\n<p align=\"center\">\n <img src=\"assets/numcorr.png\" width=50%/>\n <br>\n Correlation for numerical data\n</p>\n\nWe computed correlations only among numeric features, as well as our one target categorical feature, which represents whether a patient ended up in the ICU or died, because correlation plots do not capture relationships between one-hot encoded data very well. Those features with the highest correlation are the most likely to be redundant in some fashion, which can help us perform unsupervised feature selection. For example, notice that systolic blood pressure and diastolic blood pressure have a high correlation, so we may choose to eliminate one or combine them if we need to reduce features. This was also helpful in determining which numeric features are most strongly correlated with dying or having to go to the ICU. For example, age, length of stay (LOS), and D-dimer were all relatively correlated with death and ICU visits. \n\nThis visualization was important because it allowed us to see what factors had the most\ninfluence or correlation with the latent variables.\nBecause certain factors are more important, we can cut off extraneous factors and create a simpler,\nfaster, more understandable final model without having to record that many attributes of each patient.\n\n<p align=\"center\">\n <img src=\"assets/radviz.png\" width=50%/>\n <br>\n RadViz for numerical data\n</p>\nUsing our correlation data, we were able to depict a more graphical interpretation of what each factor means.\nThis construction was built with scikit's Yellowbrick and shows a standardized view of how\nthe factors most correlated with death/illness can be graphically distinguished from each other.\nFrom this graph, we see that entries with death/illness tend towards having higher levels of D-Dimer,\nwhich wasn't something that could be guessed without medical expertise.\n\n#### t-SNE & K-Means\n<p align=\"center\">\n <img src=\"assets/t-sne_kmeans.png\" width=50%/>\n <br>\n Data, t-SNE projected and clustered using k-means. \n Shapes represent the two clusters. \n Blue are those who died or went to the ICU, while orange did not.\n</p>\nWe were correct in our assumption that the data would naturally be clustered using 2 clusters, as evidenced by the visible clustering of the positions of datapoints. Unfortunately, these clusters were not effective at filtering those who died or went to the ICU, as shown by the relatively uniform distribution of color in both clusters. We concluded that this must be a result of one of the following: \n\n+ t-SNE projected the data in such a way that it created \"phantom\" clusters, which is a common problem when the parameters of the algorithm are off\n+ The clusters are a result of a different latent categorization instead of being related to death or going to ICU.\n\n### **Supervised Results**\n****************************\n\n### Chi-Squared Feature Selection\n<p align=\"center\">\n <img src=\"assets/chiSquareStatisticsBarGraph.png\" width=50%/>\n <br>\n Chi-squared statistics by feature. Red bars had p-value < 0.05, indicating dependence between death and the feature.\n</p>\nUsing a significance level of p = 0.05, we determined that there are 9 features that are dependent with the boolean ‘died’ variable: CKD(Chronic Kidney Disease), ARDS (Acute Respiratory Distress Syndrome), Myalgias, IDDM (Insulin Dependent Diabetes Mellitus, Fever, NV (Nausea-vomiting), Troponin.0612, Afib (Atrial Fibrillation) and CAD (Coronary Artery Disease). Due to the Admitxflu, ActiveLungCancer, and Marijuana columns having nan values, we decided we cannot interpret those and stuck with the null hypothesis that these three columns are independent of the ‘died’ column. These results are helpful in that they select 9 features out of 64 that we can say will determine whether a patient will die as these 9 features have a p-value below 0.05, meaning we reject the null hypothesis in favor of the alternate hypothesis that ‘died’ is dependent on those 9 categories. \n\n### Classification Metrics\nFor this project we used a variety of metrics to find the ideal hyperparameters for our model. We primarily measured 4 different metrics for performance: the F1 score, precision, recall, and accuracy. It is important to note that for our classification problem, there were only 2 classes to choose from. Every patient was either in Death_ICU or not in Death_ICU. This means that randomly picking between the two categories could yield an accuracy around 50%. In addition, our dataset had 116 total rows with 48 of them being in the Death_ICU category and 68 not being in Death_ICU. This also means that a classifier that picked every data point as false would be able to get around 58.6% accuracy.\n\n#### Precision\nPrecision is a measure of how correctly the returned positives were predicted. In order to calculate the precision, we divide the number of correctly predicted positives by the total number of predicted positives. Maximizing the precision will decrease the number of false positives (actual negatives that were classified as positive) the classifier returns. \n\n#### Recall\nRecall is a measure of how much of the actual positives were returned as positive. To calculate the recall we divide the number of correctly predicted positives by the total number of positives in the dataset. Maximizing the recall will decrease the number of false negatives (actual positives that were classified as negative) the classifier returns.\n\n#### Accuracy\nAccuracy is a measure of the correctness of the classifier. In order to calculate the accuracy, we add the number of correctly predicted positives with the number of correctly predicted negatives and divide that by the total number of rows in the dataset.\nSince the classes of the dataset were relatively balanced, accuracy might be a more useful metric than in other datasets with very low occurrences of certain classes. In our case, since around 58.6% of entries were false, a trivial solution would yield around that accuracy as a metric.\n\n\n#### F1-Score\nThe F1 score is another way to measure the accuracy of a classifier. F1 combines both precision and recall into one metric. We calculate the F1 score by multiplying our precision and recall together and then multiplying by 2 and then dividing by the sum of our precision and recall. Maximizing the F1 score will reduce both the number of false positives and the number of false negatives the classifier returns. However one downside of the F-score is that it does not take into account the number of True Negatives (actual negatives that were correctly returned as negative). \n\nIn a real-world setting, where we are attempting to measure the potential severity of a patient with COVID-19, the number of true negatives is not as important as the number of false positives or the number of false negatives. For example, telling patients who will never need to go to the ICU that they will soon be in the ICU (False Positive) and keeping them in the hospital for continuous monitoring will take away much needed hospital beds from patients who desperately need them. On the other hand, telling patients that will fall severely ill and will soon need to go to the ICU that they are fine (False Negative) and sending them home early will also divert hospital resources from those who need it most. For this reason, we decided to focus primarily on maximizing the F1-score which accounts for both False Positives and False Negatives.\n\n### Naive Bayes Classifiers\n#### Complement Naive Bayes\nThe Complement Naive Bayes method was designed to minimize the effects of class imbalance by comparing the probabilities\nof *not* belonging to certain classes rather than comparing probabilities of belonging to one certain class.\nAfter utilizing the Complement Naive Bayes classifier technique for the numerical data, we were able to maximize the accuracy of the model using a test_size value of 0.2. We were able to achieve an f-measure of 0.800 after tuning the hyperparameters. \nThe Complement Naive Bayes was not chosen as our final classifier since our data was already relatively balanced. \n\n#### Multinomial Naive Bayes\nWe also tried utilizing the Multinomial Naive Bayes classifier for the numerical data. Unfortunately, we did not have as much success in creating an effective model using this classifier technique. We were only able to reach an f-measure of 0.737 using this method.\nBecause Multinomial Naive Bayes was designed to work with large counts of data (such as word occurrences in text),\nthis method was not as suitable for the one-hot encoded data present in our dataset.\n\n#### Bernoulli Naive Bayes\nAfter utilizing the Bernoulli Naive Bayes classifier technique for the categorical data, we were able to maximize the effectiveness of the model when making the test_size parameter equal to 0.2. After tuning the hyperparameters, we were able to produce a classifier model using categorical features that had an f-measure of 0.78. \nWe also tried Bernoulli Naive Bayes using just the features that the chi-square feature selection method found to be significant (having a p-value less than 0.05). We were able to increase the f-measure by over 15%, from 0.78 to 0.95, using the chi-square pruning technique. This was the highest f-measure produced of all the Naive Bayes techniques. \n<p align=\"center\">\n <img src=\"assets/bernoulli_nb_best_results.png\" width=50%/>\n <br>\n Results from best Bernoulli Naive Bayes model\n</p>\nBy using Bernoulli Naive Bayes, we were able to focus on the categorical data (which was treated as Bernoulli distribution).\nBecause of the nature of categorical data (especially one-hot encoded data), the resultant classifier is able\nto accept data that doesn't rely on the precision required from numerical data.\nAlso, because of feature selection, we were able to reduce the amount of needed attributes such that\nless data had to be collected in order to yield a meaningful result.\nBy using the `predict_proba()` method, the classifier can extract a posterior probability, which\ncan be used to decide on the severity of certain patients.\n\n#### Hyperparameter Tuning\nIn any machine learning model, there are parameters of the model that programmers can use in order to\nadjust the performance and efficiency of models.\nThese parameters act as a design choice and allow modelers to push models towards optimal architectures.\nFamiliar examples of these include the number of degrees to use in a linear model, the maximum depth of a decision tree,\nand the number of layers in a neural network.\nBecause the Naive Bayes model is so simple, the `sklearn` library only provides one parameter: `alpha`.\nThis parameter denotes the amount of [additive smoothing](https://en.wikipedia.org/wiki/Additive_smoothing)\npresent in the model,\nwhere additive smoothing is essentially a small value added to probability calculations such that\nvalues of zero for certain probabilities don't turn an entire posterior probability into zero.\n`sklearn` provides the **`GridSearchCV`** object as a way to exhaustively try all values of certain parameters\n(defined via a python `dict`), and score each parameter value based on cross-validation metrics:\n\n<p align=\"center\">\n <img src=\"assets/hyperparameters.png\" width=50%/>\n <br>\n Various scoring metrics graphed with respect to values of <code>alpha</code>. (click for more) \n</p>\n\nOwing to the lightweight nature of our model, this search was able to be run on 100 different values of alpha for four\ndifferent scoring metrics.\nAs a result of this search, we determined that the optimal value for `alpha` was `0.6` in our case,\nwith each metric giving generally similar results.\nAn interesting result from this exploration, however, was that higher values of alpha seemed to yield better results\nsolely in the case of precision (although this increase in precision was smaller in comparison to the\ndecrease in other metrics had we used this higher value of `alpha`).\nWhat's also interesting to note is that there are many \"plateaus\" in the hyperparameter scores,\nmeaning that certain value ranges of `alpha` yielded the same score.\nThis result is hypothesized to be due to the small size of our dataset.\n\nFinally, it's important to note that while hyperparameter tuning is powerful for optimizing a model,\nother overarching changes to the model might bring even greater improvements to the model's performance.\nIn fact, while the more general cross-validation testing scheme of our `GridSearchCV` method yielded results\nthat showed changes in the mean scores across various folds,\nother less-robust metrics (like using one specific train-test split) often yielded\nno change in performance from the default `alpha` value.\nFortunately, another overarching change (feature selection) improved our model's performance significantly,\nwhich will be discussed later.\n\n#### Feature Selection:\nUsing the results of the Chi-squared tests allowed the model to increase the F1 score from 0.78 to 0.95,\nwhich provides an extremely significant increase in performances.\nBy limiting the features to ones that were most significant with respect to `Death_ICU`,\nthe model was able to eliminate noise in the dataset, increase performance, and be generally more lightweight.\n\n## **Discussion**\nPredicting risk based on demographic information, medical background, and behavior can provide extremely valuable insight\ninto how the COVID-19 pandemic should best be handled. At the institutional level, hospitals can use our risk predictions\nto determine how to most efficiently allocate the limited resources in order to minimize deaths and complications.\nHospitals will be able to make well-informed, data-driven decisions for how to treat patients and what to be the most wary of.\nMoreover, risk prediction and a strong understanding of what factors contribute the most to COVID-19 severity can also be informative for the individual.\nAn individual may engage in more extensive prevention behaviors if they are able to predict the severity of their illness or the illnesses of their loved ones.\nAdditionally, as a society, we can identify those individuals who are most at risk, and take extra precautions to protect them from the virus.\nWe hope that this increase in information will drive progress toward ending the pandemic.\n\n## **References**\n- “CDC COVID Data Tracker,” Centers for Disease Control and Prevention, 2020. [Online]. Available: https://covid.cdc.gov/covid-data-tracker/?CDC_AA_refVal=https%3A%2F%2Fwww.cdc.gov%2Fcoronavirus%2F2019-ncov%2Fcases-updates%2Fcases-in-us.html. [Accessed: 02-Oct-2020]. \n- J. Turcotte, “Replication Data for: Risk Factors for Severe Illness in Hospitalized Covid-19 Patients at a Regional Hospital,” Harvard Dataverse, 22-Jul-2020. [Online]. Available: https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi%3A10.7910%2FDVN%2FN2WZNK. [Accessed: 02-Oct-2020]. \n- “learn: machine learning in Python - scikit-learn 0.16.1 documentation,” scikit. [Online]. Available: https://scikit-learn.org/. [Accessed: 07-Dec-2020]. \n" } ]
2
Sherhan187/Python-assignment-3
https://github.com/Sherhan187/Python-assignment-3
1506750fd13c238b3263b451864cc168a6a92f67
2572a39843840e0ae5bf5e476de3aa10914451c7
8da9096d37c829e57cbba21518e51622b4f6fa45
refs/heads/main
2023-02-17T03:28:49.547149
2021-01-17T12:07:07
2021-01-17T12:07:07
330,376,683
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3665480315685272, "alphanum_fraction": 0.40658363699913025, "avg_line_length": 15.514705657958984, "blob_id": "f66066a0d044516a3d0673538fcc313ea66fd216", "content_id": "c7b1faac6bbcf86a06def15726a85d0301c13afb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 46, "num_lines": 68, "path": "/main.py", "repo_name": "Sherhan187/Python-assignment-3", "src_encoding": "UTF-8", "text": "# Python-assignment-3\n\n#1)\nnum = int(input(\"enter num\"))\nx = 1\nwhile x**2 <= num:\nprint(x**2, end = \" \")\nx += 1\n\n\n#2)\nx = input()\ny = 0\nfor i in range(len(x)):\n if x[i].isdigit() == True:\n coun = 0\n for j in range(i + 1, len(x)):\n if x[j] == '!':\n coun += 1\n if coun > 2:\n break\n if coun == 2 and x[j].isdigit() == True:\n a = int(x[i]); b = int(x[j])\n if a + b == 6:\n y = 1\n break;\n if y == 1:\n break\nif y != 0:\n print(\"True\")\nelse:\n print(\"False\")\n\n\n#3)\nx = [int(y) for y in input().split()]\ncoun= 0\nfor i in range(len(x)):\n if x[i] == 0:\n break;\n if x[i] % 2 == 0:\n coun += 1\nprint(coun)\n\n#4)\nx = int(input())\npow = 0\nwhile 2**(pow + 1) <= x:\npow += 1\nprint(pow, 2**pow)\n\n\n#5) \ndef maxre(num_list):\n x, ctr = 0, 1\n \n for i in range(1, len(num_list)):\n if num_list[x] == num_list[i]:\n ctr += 1\n else:\n ctr -= 1\n if ctr == 0:\n x = i\n ctr = 1\n \n return num_list[x]\n\nprint(maxre([1,2,8,6,1,0,2,1]))\n\n" } ]
1
pratapvardhan/uberStore
https://github.com/pratapvardhan/uberStore
08ca9bea382c3be7eb1e3b46eb3fec65ee475afc
326d7ec8f4b7215e16d1054491b7e19ec796cf11
d1fd1795c3237a091267c1f76da72809fd2f8629
refs/heads/master
2020-03-28T01:11:09.721991
2015-05-29T06:51:04
2015-05-29T06:51:04
24,133,854
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7099999785423279, "alphanum_fraction": 0.7099999785423279, "avg_line_length": 24, "blob_id": "2b0b6dae7c952fa7561588eea1d0f747724740d0", "content_id": "3eb40d0f813ba686a008b0dcf4b318f3355acb59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 100, "license_type": "permissive", "max_line_length": 78, "num_lines": 4, "path": "/README.md", "repo_name": "pratapvardhan/uberStore", "src_encoding": "UTF-8", "text": "uberStore\n=========\n\nUtility scripts - bash, shell, python, aliases, settings and other misc things\n" }, { "alpha_fraction": 0.6467971801757812, "alphanum_fraction": 0.6622182726860046, "avg_line_length": 24.74045753479004, "blob_id": "33cb30c728d41ab049ee43e8d97e93c29a842025", "content_id": "435a3b1c47b5b1fbb19b981b90ad2f3ed85b43b9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3372, "license_type": "permissive", "max_line_length": 160, "num_lines": 131, "path": "/.bashrc", "repo_name": "pratapvardhan/uberStore", "src_encoding": "UTF-8", "text": "set BWORK=~/bashwork\n\nalias ..='cd ..'\nalias ...='cd ../..'\nalias ....='cd ../../..'\nalias .....='cd ../../../..'\nalias cd..='cd ..'\n# clear console\nalias clc='clear'\nalias cls='clear'\n# Colorize the ls output\nalias l='ls -ahoF --color=auto'\n# Show only hidden files\nalias l.='ls -d .* --color=auto'\nalias dir='ls -gGAh --color=auto --file-type'\n# delete directory\nalias rmdir='rm -r'\n# Re-Load bashrc file\nalias bashrc='source ~/.bashrc'\nalias bashload='source ~/.bashrc'\n# copy the working directory to clipboard\nalias cwd='pwd | clip'\n# Get your current public IP\nalias ip='curl icanhazip.com'\n\n: '\nAPPS\n'\n# Anaconda\nalias python='/c/anaconda/python.exe'\nalias ipython='/c/anaconda/scripts/ipython.exe'\nalias uncompyle2=\"/c/Anaconda/Scripts/uncompyle2\"\nalias flake8='/c/anaconda/scripts/flake8.exe'\nalias ipy='ipython notebook'\nalias python3='/c/Anaconda/envs/py34/python.exe'\nalias pd0.15.2='/c/Anaconda/envs/pd0.15.2/python.exe'\npep8 () { python \"/c/anaconda/scripts/pep8-script.py\" \"$1\"; }\npyflakes () { python \"/c/anaconda/scripts/pyflakes-script.py\" \"$1\"; }\n\nalias chrome='/c/progra~2/Google/Chrome/Application/chrome.exe'\nalias excel='/c/Progra~2/Microsoft Office/Office15/excel.exe'\nalias py='/c/Progra~2/PyScripter/PyScripter.exe'\nalias subl='\"/c/Program Files/Sublime Text 3/sublime_text.exe\"'\nalias 7-z='\"/c/Program Files/7-Zip/7z.exe\"'\nalias ffmpeg='/c/ffmpeg/bin/ffmpeg.exe'\nalias ffprobe='/c/ffmpeg/bin/ffprobe.exe'\nalias julia='cmd //c /c/Julia-0.3.3/bin/julia.exe'\nalias julianew='start /c/Julia-0.3.3/bin/julia.exe'\n\nalias gramex='START cmd //c \"python D:/vis/gramex.py index.html\"'\nalias dirlearngram=\"cd /d/z-learn-gramener-com/\"\n\nalias songs='explorer \"D:\\EE\\Pratap Musik\"'\nalias watch='explorer \"D:\\EE\"'\nalias temp=\"/d/temp\"\nalias oe=\"explorer\"\n\nalias phantomjs=\"/c/apps/phantomjs\"\nalias wget=\"/c/cygwin64/bin/wget.exe\"\nalias pandoc=\"/d/apps/pandoc.exe\"\n#alias sphinx-build='/c/anaconda/scripts/sphinx-build.bat'\ngoogle () { chrome \"google.com/#q=\"\"$1\"; }\nalias edit=\"subl\"\nalias open=\"start\"\nopenlearn () { dirlearngram; gramex; }\n\n: '\nUtilities\n'\nalias o='python -ux ~/bashwork/o.cmd'\n\nzip () { 7-z a \"$1\".zip \"$1\" ; }\n7z () { 7-z a \"$1\".7z \"$1\" ; }\n\n# html template\ned () {\n touch $1\n cat ~/bashwork/temp.html >>$1\n subl $1\n}\ntoday () {\n echo \"Today's date is:\"\n date +\"%A, %B %-d, %Y\"\n}\nargs () {\n echo $@\n echo $0\n echo $1\n echo $2\n}\n\n: '\nSEARCH\n# ff: to find a file under the current directory\n# ffs: to find a file whose name starts with a given string\n# ffe: to find a file whose name ends with a given string\n'\nff () { find . -name \"$@\" ; }\nffs () { find . -name \"$@\"'*' ; }\nffe () { find . -name '*'\"$@\" ; }\n\n: '\nGIT Aliases\n'\nalias gs='git status'\nalias gc='git commit'\nalias ga='git add'\nalias gdiff='git diff'\nalias gull='git pull'\nalias gf='git fetch upstream'\nalias gm='git merge upstream/master'\nalias gp='git push'\nalias gb='git branch'\nalias gco='git checkout'\nalias gcl='git clone'\nalias glog=\"git log --all --graph --pretty=format:'%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit --date=relative\"\nalias g-logpg='git log --pretty=format:\"%h %s\" --graph'\ngitfmp () { gf; gm; gp; }\n\n\n: '\nls -lS /path/to/folder/\n Capital S.\n This will sort files in size.\nls -t\n By Date\nls -tr\n for reverse, most recent at bottom\n'\n\nalias sshec2gram='ssh [email protected]'\n" }, { "alpha_fraction": 0.5534918308258057, "alphanum_fraction": 0.5646359324455261, "avg_line_length": 31.829267501831055, "blob_id": "8128299637ffe423bd083db2894eb3c119671a8b", "content_id": "eb04022aafbd7d9001080ccf9aa6c445d8219092", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1346, "license_type": "permissive", "max_line_length": 80, "num_lines": 41, "path": "/randfiles.py", "repo_name": "pratapvardhan/uberStore", "src_encoding": "UTF-8", "text": "\"\"\"\nCreate Random files\n\"\"\"\nimport os\nimport random\nimport string\n\ndef randfiles(location, indirs=5, maxfiles=5, filelen=5, ext=['.bin', '.txt']):\n \"\"\"\n Create files recursively inside a directory with random filenames.\n\n Parameters\n ----------\n location : str, directory where random folder with files are to created\n indirs : int, number of subdirectories\n default=5\n maxfiles : int, maximum files under each subdirectory\n default=5\n filelen : int, length of each file\n default=5\n ext : list, extensions for file\n\n Example\n -------\n PATH = \"D:/temp/data/files/\"\n EXT = ['.txt', '.doc', '.png', '.pdf']\n randfiles(PATH, indirs=15, maxfiles=5, filelen=6, ext=EXT)\n \"\"\"\n rnd = random.randint\n extno = len(ext)-1\n for pid in range(1, indirs+1):\n for idf in range(rnd(1, maxfiles)):\n base = ''.join(random.choice(string.ascii_lowercase + string.digits)\n for x in range(filelen)) + str(idf)\n path = os.path.join(location + str(pid), base + ext[rnd(0, extno)])\n if not os.path.exists(path):\n folder = os.path.dirname(path)\n if not os.path.exists(folder):\n os.makedirs(folder)\n print path\n open(path, 'a').close()\n" }, { "alpha_fraction": 0.5211793780326843, "alphanum_fraction": 0.5340531468391418, "avg_line_length": 31.106666564941406, "blob_id": "ae81054e2f6aa40419dfb5f5c8924dde38c97358", "content_id": "5eee810f1d7e248f540b02b2f39679ab329e6855", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2408, "license_type": "permissive", "max_line_length": 120, "num_lines": 75, "path": "/o.py", "repo_name": "pratapvardhan/uberStore", "src_encoding": "UTF-8", "text": "'''\nFileOpener:\nUsage: python o.py\n'''\nimport os, sys, re\n\npath, file = os.path.split(__file__)\n\nconfig_file = os.path.join(path, 'o.index')\nif not os.path.exists(config_file):\n open(config_file, 'w').close()\nfile_index = open(config_file).readlines()\n\nused_file = os.path.join(path, 'o.shortcuts')\nif not os.path.exists(used_file):\n open(used_file, 'w').close()\nused_index = open(used_file).readlines()\n\nroots = (\n ('', 'D:\\\\2seeVids'),\n ('', 'D:\\\\Ebooks'),\n ('', 'D:\\\\EdVideos'),\n ('', 'E:\\\\ET'),\n ('', 'E:\\\\TV'),\n ('', 'E:\\\\Pratap Musik')\n)\n\ndef create_index():\n print 'Creating Index for'\n for root in roots:\n print root[1]\n for path, subdirs, files in os.walk(root[1]):\n for name in files:\n yield os.path.join(path, name) + '\\n'\n print 'Index completed'\n return\n\ndef filter_index(words, any=0):\n phrase = ' '.join(words).lower()\n flag_cache = 1\n if any == 1:\n result = [i.split('\\t')[2] for i in used_index if (i.split('\\t')[0] == str(any) and phrase == i.split('\\t')[1])]\n if result == []:\n result = [i for i in file_index if all(x in i.lower() for x in words)]\n flag_cache = 0\n else:\n result = [i.split('\\t')[2] for i in used_index if (i.split('\\t')[0] == str(any) and phrase in i.lower())]\n if result == []:\n result = [i for i in file_index if phrase in i.lower()]\n flag_cache = 0\n if len(result) == 1:\n print 'Opening %s' % result[0][:-1]\n print 'for: \" %s \"' % phrase\n if flag_cache == 0:\n open(used_file, 'a').write(str(any)+'\\t'+phrase+'\\t'+result[0][:-1]+'\\n')\n os.startfile(result[0][:-1])\n else:\n for i in result:\n print i[:-1]\n print '%d files exist' % len(result)\n\nif len(sys.argv) > 1 and sys.argv[1] == '-index':\n open(config_file, 'w').writelines(create_index())\n open(used_index, 'w').writelines('')\nelif len(sys.argv) > 1 and sys.argv[1] == '.':\n filter_index(sys.argv[2:])\nelif len(sys.argv) > 1 and sys.argv[1] == ',':\n filter_index(sys.argv[2:], any=1)\nelse:\n print 'Using a utility file'\n print 'Usage:'\n print 'To recreate index : python o.py -index'\n print 'To open a file '\n print ' Phrase : python o.py . steve jobs'\n print ' Anywhere : python o.py , steve jobs books'\n" } ]
4
tom-itl/Machine_Learning
https://github.com/tom-itl/Machine_Learning
1d422eeea378b2b5902ba60e2982b1f2ae45f783
b8a7e6066533ea4766f4e92be8a3562496dbdf26
43b866ade6a8f661c3916746850117b157cff5f7
refs/heads/master
2022-11-24T18:58:57.174335
2020-07-21T23:05:30
2020-07-21T23:05:30
275,685,820
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5934796929359436, "alphanum_fraction": 0.6061210632324219, "avg_line_length": 31.919708251953125, "blob_id": "7c2041ea90d153b60a05296673320844101f3fee", "content_id": "ad65d039e9083414f5b70fb332653579a0b5553f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4509, "license_type": "no_license", "max_line_length": 140, "num_lines": 137, "path": "/p2_image_classifier/predict.py", "repo_name": "tom-itl/Machine_Learning", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport numpy as np\nfrom PIL import Image\n\n# Disable CUDA messages:\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}\n\nimport tensorflow as tf\nimport tensorflow_hub as hub\nimport json\nimport sys\nimport getopt\nimport signal\n\n\ndef predict(image_path, model, top_k):\n ''' Predicts the flower category for the given image_path,\n using the given machine learning model, and returns the\n top_k probabilities and classes.\n \n Note that the classes are adjusted to account for the \n 1-based vs 0-based indexing in the class names JSON file.\n '''\n image = process_image(np.asarray(Image.open(image_path)))\n image = np.expand_dims(image, axis=0)\n \n pred = model.predict(image)\n \n pred = pred[0].tolist()\n \n probs, classes = tf.math.top_k(pred, k=top_k)\n \n return probs, classes + 1 # Correct class index to 1-based used in class_names\n\ndef process_image(np_image):\n ''' Takes in an image that consists of numpy arrays,\n resizes to 224 x 224 pixels, and normalizes the \n color values to 0-1. \n \n Returns image as set of numpy arrays.\n '''\n image_size = 224\n tf_image = tf.convert_to_tensor(np_image)\n tf_image = tf.image.resize(np_image, [image_size, image_size])\n tf_image /= 255.0\n return tf_image.numpy()\n\n\ndef sigint_handler(signum, frame):\n ''' Catches interrupts such as Ctrl-C and exits nicely instead\n of throwing up an unnecessary stack trace. Unfortunately it\n does not catch a SIGINT event during imports.\n '''\n print('Caught interrupt; exiting nicely.')\n exit(0)\n \ndef print_help():\n print('Usage: python predict.py <image path> <model path>')\n print('\\nOptional arguments:')\n print('\\t--top_k <K>\\t\\t\\t\\tNumber of probabilities and classes to return')\n print('\\t--category_names <JSON name file path>\\tName of JSON file containing category to index mapping')\n print('\\t-p\\t\\t\\t\\t\\tPlot image and prediction results')\n\ndef main():\n signal.signal(signal.SIGINT, sigint_handler)\n \n # Make sure the input image path and model name are not missing.\n if len(sys.argv) < 3:\n print('Not enough arguments.\\n')\n print_help()\n exit(0)\n \n image_path = sys.argv[1]\n model_path = sys.argv[2]\n \n # Set default values for optional arguments\n top_k = 5\n cat_names_path = './label_map.json'\n plot_flag = False\n \n # Modify values for optional arguments if specified on command line\n opts, args = getopt.gnu_getopt(sys.argv[3:], 't:c:p', ['top_k=', 'category_names=', 'plot'])\n for opt, arg in opts:\n if opt in ('-t', '--top_k'):\n top_k = int(arg)\n elif opt in ('-c', '--category_names'):\n cat_names_path = arg\n elif opt in ('-p', '--plot'):\n plot_flag = True\n else:\n print('Unknown argument.')\n print_help()\n \n with open(cat_names_path, 'r') as f:\n class_names = json.load(f)\n\n # Load model and make predictions\n model = tf.keras.models.load_model(model_path, custom_objects={'KerasLayer':hub.KerasLayer})\n probs, classes = predict(image_path, model, top_k)\n \n print(f'\\nThe flower was predicted to be: {class_names[str(classes.numpy()[0])]}, with {round(probs.numpy()[0]*100, 2)} % probability.')\n \n categories = []\n for flower_class in classes.numpy():\n categories.append(class_names[str(flower_class)])\n \n print(f'\\nThe top {top_k} results:')\n print('===================')\n for i in range(len(probs.numpy())):\n # Some kludging because the rounded probability isn't working in formatted string\n prob = np.round(probs.numpy()[i], 4)\n print('Probability: ', end='')\n print(prob, end='')\n print(f',\\tCategory: {classes.numpy()[i]},\\tName: {categories[i].title()}')\n \n if plot_flag:\n # Only import if actually plotting\n import matplotlib.pyplot as plt\n \n \n \n fig, (ax1, ax2) = plt.subplots(figsize=(12, 6), ncols=2)\n ax1.set_xticks([])\n ax1.set_yticks([])\n ax1.imshow(Image.open(image_path))\n # ax1.set_title('Input Test Image')\n ax2.barh(categories, probs) #sorted(probs.numpy().tolist()))\n ax2.invert_yaxis()\n ax2.set_title('Class Probability')\n ax2.set_xlim([0.0, 1.0])\n plt.tight_layout()\n plt.show() \n \nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.7763713002204895, "alphanum_fraction": 0.7848101258277893, "avg_line_length": 58.25, "blob_id": "07718947f93ed7bcaf4697c96d96cecca4ae8686", "content_id": "dee1ca424a31bc952f1730a02dc12732716b5822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 474, "license_type": "no_license", "max_line_length": 83, "num_lines": 8, "path": "/p2_image_classifier/check_all_images.sh", "repo_name": "tom-itl/Machine_Learning", "src_encoding": "UTF-8", "text": "printf 'First image - Wild Pansy'\npython predict.py ./test_images/wild_pansy.jpg flower_recog_model.h5\nprintf '\\n\\nSecond image - Orange Dahlia'\npython predict.py ./test_images/orange_dahlia.jpg flower_recog_model.h5\nprintf '\\n\\nThird image - Hard-Leaved Pocket Orchid'\npython predict.py ./test_images/hard-leaved_pocket_orchid.jpg flower_recog_model.h5\nprintf '\\n\\nFourth image - Cautleya Spicata'\npython predict.py ./test_images/cautleya_spicata.jpg flower_recog_model.h5\n" } ]
2
gjones1911/DeepSolar_Code_Base
https://github.com/gjones1911/DeepSolar_Code_Base
7e02d5947b4bb99d15650f3a047ab029bb0a6def
b7a7e330af7323f85d20d2b4537f82633920e8e9
e3056f09f7a059ae0c9431b1eb301454dc9f8a88
refs/heads/master
2020-12-14T12:29:46.184411
2020-02-09T02:03:32
2020-02-09T02:03:32
234,743,423
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5380861759185791, "alphanum_fraction": 0.5807963013648987, "avg_line_length": 50.914085388183594, "blob_id": "85583e2d42aeb1c93b696197be74a57296fcf0f6", "content_id": "4321bd9a2773fcf57817330b54a4ed645a2385fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 82182, "license_type": "no_license", "max_line_length": 199, "num_lines": 1583, "path": "/DeepSolarModels.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "\"\"\" This is a collection of different variables sets and a few data structures to make them \"\"\"\n\"\"\" easier to load into a model, \"\"\"\nfrom statistics import mean\n\nimport pandas as pd\n#from _products.ML_Tools import load_tree_trunc_features\ndef load_tree_trunc_features(df=None, dffile=None, limit=.00, verbose=False):\n if df is None:\n df = pd.read_excel(dffile, usecols=['Variable', 'Imp_trunc'])\n\n df = df.loc[df['Imp_trunc'] >= limit, 'Variable']\n if verbose:\n print(list(df))\n return list(df)\n\ndef voted_list(vote_dic, thresh=2):\n rl = list()\n for f in vote_dic:\n if vote_dic[f] > 2:\n rl.append(f)\n return rl\n\ndef tally_var_votes(votes,):\n #from _products.utility_fnc import sort_dict\n import operator\n rd = {}\n for va in votes:\n for v in va:\n if v not in rd:\n rd[v] = 0\n rd[v] += 1\n\n return dict(sorted(rd.items(), key=operator.itemgetter(1), reverse=True))\n\npopdenonly = ['Adoption', 'population_density', 'heating_fuel_coal_coke_rate',\n 'education_bachelor', 'pop25_some_college_plus', 'travel_time_49_89_rate', 'education_master',\n 'pop_female', '',\n ]\n\n# ##############################################################\n# ##############################################################\n# ############TODO: Random Forest FS sets/simple sets #####################\n# ##############################################################\n# ##############################################################\n\ntallff = 0\n# 0.7333350960892336, Good is now:\nsimple_set4 = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'pop_over_65', 'hu_2000toafter_scld']\n\n# Score: 0.735002341556155, Good is now:\nsimple_set10 = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'incent_cnt_res_own',\n 'travel_time_49_89_rate', 'education_doctoral']\n\n# Score: 0.7352799456986788, Good is now:\nsimple_set9 = ['population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld', 'heating_fuel_none', 'hu_med_val_scld',\n 'age_55_64_rate']\n\n# Score: 0.7355576655819634, Good is now:\nsimple_set11 = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'hu_2000toafter_scld', 'pop_over_65',\n 'heating_fuel_none_rate', 'education_less_than_high_school']\n\n# 0.7358360798133626\nsimple_set3 = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education',\n 'hu_monthly_owner_costs_lessthan_1000dlrs', 'avg_electricity_retail_rate_scld', 'land_area_scld',\n 'hu_vintage_1939toearlier_scld']\n\n# Score: 0.7361141471600564, Good is now:\nsimple_set6 = ['population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'age_65_74_rate',\n 'total_area', 'heating_fuel_gas_scld', 'hu_2000toafter_scld',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs', 'incentive_count_nonresidential']\n\n# 0.7363917513990309\nsimple_set = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld', 'avg_electricity_retail_rate_scld', 'heating_fuel_other']\n\n# Score: 0.7376407100697606\nsimple_set7 =['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'pop_over_65',\n 'hu_2000toafter_scld', 'incent_cnt_res_own', 'travel_time_10_19_rate', 'employ_rate_scld',\n 'education_bachelor_rate', 'pop_med_age_scld', 'hh_med_income']\n\n# 0.7377803512494823\nsimple_set2 = ['population_density_scld', 'E_DAYPOP', 'number_of_years_of_education_scld', 'total_area_scld',\n 'age_65_74_rate', 'poverty_family_below_poverty_level_rate', 'heating_fuel_solar_scld',\n 'travel_time_10_19_rate', 'occupation_finance_rate', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'heating_fuel_coal_coke_scld', 'heating_fuel_none']\n\n# e: 0.737853762194533, 0.21913339615387883\nsimple_set8 = ['population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education',\n 'avg_electricity_retail_rate_scld', 'travel_time_10_19_rate']\n\n# Score: 0.737853762194533, Good is now:\nsimple_set12 = ['population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education',\n 'avg_electricity_retail_rate_scld', 'travel_time_10_19_rate']\n\n# Score: 0.7384736961325862, Good is now: TODO: take out incentive residential\nsimple_set5 = ['population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education', 'incentive_count_residential',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld', 'avg_electricity_retail_rate_scld',\n 'incentive_count_nonresidential', 'fam_med_income_scld', 'travel_time_10_19_rate', 'age_75_84_rate']\n\n# Score: 0.739968571305989, Good is now:\nsimple_set13 = ['population_density_scld', 'E_DAYPOP', 'number_of_years_of_education_scld', 'heating_fuel_none',\n 'total_area_scld', 'poverty_family_below_poverty_level_rate', 'occupation_finance_rate',\n 'travel_time_10_19_rate', 'heating_fuel_solar_scld']\n\nsimple_set_avg = mean([0.7363917513990309, 0.7377803512494823, 0.7358360798133626, 0.7333350960892336,\n 0.7384736961325862,0.7361141471600564, 0.7376407100697606, 0.737853762194533,\n 0.7352799456986788, 0.7355576655819634, 0.737853762194533])\n\n# ##############################################################\n# ##############################################################\n# ############TODO: Logistic Regression FS sets ###############\n# ##############################################################\n# ##############################################################\nR2set = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'heating_fuel_coal_coke_scld',\n 'population_density_scld', 'hu_med_val_scld', 'pop_total_scld', 'hu_vintage_1939toearlier_scld',\n 'education_bachelor_rate', 'incent_cnt_res_own', 'pop_under_18_scld', 'avg_inc_ebill_dlrs_scld',\n 'housing_unit_median_gross_rent_scld', 'pop_over_65_scld', 'education_master_scld', 'age_median_scld',\n 'heating_fuel_gas_scld', 'net_metering_hu_own', 'average_household_income_scld', 'avg_monthly_bill_dlrs_scld',\n 'E_AGE17_scld', 'travel_time_49_89_rate','hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'education_bachelor_scld', 'education_master_rate', 'age_25_44_rate_scld',\n 'education_doctoral_scld']\n\ngood_acc_lr = ['education_bachelor_scld', 'total_area_scld', 'pop_total_scld', 'population_density_scld',\n 'hu_vintage_1939toearlier_scld', 'land_area_scld', 'net_metering_hu_own', 'housing_unit_count_scld',\n 'own_popden_scld']\n# .7178637200736648\ngood_acc_lr2 = ['education_bachelor_scld', 'travel_time_average_scld', 'heating_fuel_housing_unit_count_scld',\n 'hu_med_val_scld', 'heating_fuel_coal_coke_scld', 'avg_monthly_bill_dlrs_scld', 'pop_under_18_scld',\n 'education_master_scld', 'education_doctoral_scld']\n\n# good acc: 0.7436464088397791, R2 0.1561717889129457\ngood_acc_lr3 = ['Adoption', 'land_area_scld', 'education_bachelor_scld', 'travel_time_average_scld', 'hh_size_4_scld',\n 'education_master_rate', 'mod_sf_own_mwh_scld', 'number_of_years_of_education_scld',\n 'bachelor_or_above_rate', 'hu_own_scld', 'hu_vintage_2000to2009_scld', 'education_bachelor_rate']\n\n# good acc: 0.7152854511970534, R2 0.12101327282984775\ngood_acc_lr4 = ['education_bachelor_scld', 'total_area_scld', 'hu_vintage_1939toearlier_scld', 'heating_fuel_other_scld']\n\n# good acc: 0.7296500920810313, R2 0.18002674246895034\ngood_acc_lr5 = ['education_bachelor_scld', 'travel_time_average_scld', 'heating_fuel_housing_unit_count_scld',\n 'bachelor_or_above_rate', 'land_area_scld', 'pop_female_scld', 'population_density_scld',\n 'hh_size_2_scld', 'net_metering_hu_own', 'pop_over_65_scld', 'pop_total_scld',\n 'housing_unit_median_gross_rent_scld', 'med_inc_ebill_dlrs_scld', 'hu_own_scld']\n\n# good acc: 0.7340699815837938, R2 0.1713015497234639\ngood_acc_lr6 = ['land_area_scld', 'pop25_some_college_plus_scld', 'travel_time_average_scld',\n 'population_density_scld', 'heating_fuel_coal_coke_scld', 'fam_med_income_scld',\n 'heating_fuel_housing_unit_count_scld', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'pop_over_65_scld']\n\n# good acc: 0.7355432780847145, R2 0.18448226867693285\ngood_acc_lr7 = ['education_bachelor_scld', 'land_area_scld', 'pop_male_scld', 'hu_vintage_1939toearlier_scld',\n 'population_density_scld', 'education_doctoral_scld', 'travel_time_average_scld',\n 'bachelor_or_above_rate', 'pop_over_65_scld', 'heating_fuel_coal_coke_scld',\n 'avg_inc_ebill_dlrs_scld', 'E_AGE17_scld']\n\n\ngood_acc_Aavg = mean([.7178637200736648, 0.7436464088397791, .7152854511970534, 0.7296500920810313, 0.7340699815837938,\n 0.7355432780847145])\ngood_acc_Ravg = mean([0.1561717889129457, 0.12101327282984775, 0.18002674246895034, 0.1713015497234639, 0.18448226867693285])\ngood_acc_votes = [good_acc_lr, good_acc_lr2, good_acc_lr3, good_acc_lr4, good_acc_lr5, good_acc_lr6]\n\n# good Rsqr: 0.20098292514467608, Accuracy 0.7152854511970534\ngood_Rsqr4 = ['Adoption', 'pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'heating_fuel_coal_coke_scld',\n 'population_density_scld', 'hu_med_val_scld', 'pop_total_scld', 'hu_vintage_1939toearlier_scld',\n 'education_bachelor_rate', 'incent_cnt_res_own', 'pop_over_65_scld', 'med_inc_ebill_dlrs_scld',\n 'heating_fuel_gas_scld', 'pop_under_18_scld', 'net_metering_hu_own', 'education_bachelor_scld',\n 'education_master_scld', 'number_of_years_of_education_scld', 'pop25_no_high_school_scld',\n 'E_AGE17_scld', 'education_professional_school_scld', 'own_popden_scld', 'hh_size_2_scld',\n 'hu_vintage_1980to1999_scld', 'hu_vintage_1960to1970_scld', 'hu_2000toafter_pct_scld',\n 'median_household_income_scld', 'avg_monthly_bill_dlrs_scld', 'high_school_or_below_rate',\n 'education_population_scld', 'bachelor_or_above_rate', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'age_median_scld', 'travel_time_49_89_rate', 'heating_fuel_other_scld', 'hu_vintage_2010toafter_scld',\n 'housing_unit_median_gross_rent_scld', 'hu_own_scld', 'hh_size_4_scld', 'total_area_scld',\n 'pop_female_scld', 'poverty_family_count_scld', 'hh_size_3_scld', 'heating_fuel_electricity_scld',\n 'hu_2000toafter', 'average_household_income_scld', 'avg_inc_ebill_dlrs_scld', 'education_master_rate',\n 'education_doctoral_scld', 'age_25_44_rate_scld', 'fam_med_income_scld', 'mod_sf_own_mwh_scld',\n 'housing_unit_count_scld', 'education_high_school_graduate_rate', 'heating_fuel_housing_unit_count_scld',\n 'hu_own_pct', 'household_count_scld']\n\n# Rsqaure = 0.20090205587065024\ngood_Rsqr = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld',\n 'heating_fuel_coal_coke_scld', 'population_density_scld', 'hu_med_val_scld', 'pop_total_scld',\n 'hu_vintage_1939toearlier_scld', 'education_bachelor_rate', 'incent_cnt_res_own', 'pop_under_18_scld',\n 'avg_inc_ebill_dlrs_scld', 'housing_unit_median_gross_rent_scld', 'pop_over_65_scld',\n 'education_master_scld', 'age_median_scld', 'heating_fuel_gas_scld', 'net_metering_hu_own',\n 'average_household_income_scld', 'avg_monthly_bill_dlrs_scld', 'E_AGE17_scld', 'travel_time_49_89_rate',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld', 'education_bachelor_scld', 'education_master_rate',\n 'age_25_44_rate_scld', 'own_popden_scld', 'hu_vintage_1960to1970_scld', 'hh_size_2_scld',\n 'hu_vintage_1980to1999_scld', 'hu_vintage_2000to2009_scld', 'housing_unit_count_scld', 'total_area_scld',\n 'education_high_school_graduate_rate', 'pop25_no_high_school_scld', 'education_population_scld',\n 'high_school_or_below_rate', 'education_professional_school_scld', 'hh_size_4_scld', 'mod_sf_own_mwh_scld',\n 'number_of_years_of_education_scld', 'bachelor_or_above_rate', 'heating_fuel_electricity_scld',\n 'median_household_income_scld', 'med_inc_ebill_dlrs_scld', 'poverty_family_count_scld', 'hh_size_3_scld',\n 'education_doctoral_scld', 'heating_fuel_housing_unit_count_scld', 'hu_2000toafter', 'pop_female_scld',\n 'hu_own_scld', 'hu_own_pct', 'heating_fuel_other_scld', 'hu_2000toafter_pct_scld', 'fam_med_income_scld',\n 'masters_or_above_rate']\n\n# good Rsqr: 0.20355407810942183\ngood_Rsqr2 = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'heating_fuel_coal_coke_scld',\n 'population_density_scld', 'hu_med_val_scld', 'pop_total_scld', 'education_bachelor_rate',\n 'pop_under_18_scld', 'hu_vintage_1939toearlier_scld', 'incent_cnt_res_own', 'pop_over_65_scld',\n 'education_high_school_graduate_rate', 'hu_2000toafter_pct_scld', 'hu_vintage_1960to1970_scld',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld', 'avg_inc_ebill_dlrs_scld',\n 'housing_unit_median_gross_rent_scld', 'hu_vintage_1980to1999_scld', 'hh_size_2_scld',\n 'net_metering_hu_own', 'education_professional_school_scld', 'E_AGE17_scld', 'education_bachelor_scld',\n 'high_school_or_below_rate', 'pop25_no_high_school_scld', 'education_population_scld', 'age_median_scld',\n 'heating_fuel_electricity_scld', 'pop_male_scld', 'travel_time_49_89_rate', 'mod_sf_own_mwh_scld',\n 'education_college_scld', 'number_of_years_of_education_scld', 'med_inc_ebill_dlrs_scld',\n 'fam_med_income_scld', 'masters_or_above_rate', 'education_master_rate', 'housing_unit_count_scld',\n 'hu_own_pct', 'hh_size_4_scld', 'avg_monthly_bill_dlrs_scld', 'average_household_income_scld',\n 'education_doctoral_scld', 'median_household_income_scld', 'poverty_family_count_scld', 'hu_2000toafter',\n 'own_popden_scld', 'hu_own_scld', 'hh_size_3_scld', 'total_area_scld', 'age_25_44_rate_scld',\n 'hu_vintage_2000to2009_scld', 'heating_fuel_other_scld', 'heating_fuel_gas_scld',\n 'heating_fuel_housing_unit_count_scld']\n\n# good Rsqr: 0.1929803665011074, Accuracy 0.7252302025782689\ngood_Rsqr3 = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'heating_fuel_coal_coke_scld',\n 'population_density_scld', 'hu_med_val_scld', 'pop_total_scld', 'education_bachelor_rate',\n 'pop_over_65_scld', 'hu_vintage_1939toearlier_scld', 'incent_cnt_res_own', 'med_inc_ebill_dlrs_scld',\n 'heating_fuel_gas_scld', 'education_bachelor_scld', 'housing_unit_median_gross_rent_scld',\n 'pop_under_18_scld', 'E_AGE17_scld', 'hh_size_2_scld', 'median_household_income_scld',\n 'net_metering_hu_own', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld', 'hu_own_scld',\n 'heating_fuel_housing_unit_count_scld', 'housing_unit_count_scld', 'avg_monthly_bill_dlrs_scld',\n 'number_of_years_of_education_scld', 'pop25_no_high_school_scld', 'bachelor_or_above_rate',\n 'education_professional_school_scld', 'high_school_or_below_rate', 'education_population_scld',\n 'hu_own_pct', 'age_median_scld', 'hu_vintage_1960to1970_scld', 'hu_2000toafter_pct_scld',\n 'hu_vintage_1980to1999_scld', 'hu_vintage_2010toafter_scld', 'poverty_family_count_scld',\n 'hh_size_1_scld', 'age_25_44_rate_scld', 'education_doctoral_rate', 'education_high_school_graduate_rate',\n 'education_doctoral_scld', 'hu_2000toafter', 'heating_fuel_electricity_scld', 'travel_time_49_89_rate',\n 'hh_size_3_scld', 'fam_med_income_scld', 'pop_female_scld', 'mod_sf_own_mwh_scld', 'heating_fuel_other_scld',\n 'total_area_scld', 'own_popden_scld', 'education_master_scld', 'average_household_income_scld',\n 'avg_inc_ebill_dlrs_scld']\n\n# good Rsqr: 0.18201814141868777, Accuracy 0.7344383057090239\ngood_Rsqr5 = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'population_density_scld',\n 'heating_fuel_coal_coke_scld', 'hu_med_val_scld', 'pop_total_scld', 'hu_vintage_1939toearlier_scld',\n 'education_bachelor_rate', 'incent_cnt_res_own', 'pop_over_65_scld', 'pop_under_18_scld', 'E_AGE17_scld']\n\nLR_ac = ['education_bachelor_scld',\n'land_area_scld',\n'travel_time_average_scld',\n'total_area_scld',\n'pop_total_scld',\n'population_density_scld',\n'hu_vintage_1939toearlier_scld',\n'net_metering_hu_own',\n'heating_fuel_housing_unit_count_scld',]\n\n# good Rsqr: 0.18966013446910612, Accuracy 0.7233885819521179\ngood_Rsqr6 = ['pop25_some_college_plus_scld', 'travel_time_average_scld', 'land_area_scld', 'population_density_scld',\n 'heating_fuel_coal_coke_scld', 'hu_med_val_scld', 'pop_total_scld', 'education_bachelor_rate',\n 'hu_vintage_1939toearlier_scld', 'incent_cnt_res_own', 'pop_over_65_scld']\n\nRsqr_LRs = ['pop25_some_college_plus_scld',\n 'travel_time_average_scld',\n 'land_area_scld',\n 'heating_fuel_coal_coke_scld',\n 'population_density_scld',\n 'hu_med_val_scld',\n 'pop_total_scld',\n 'hu_vintage_1939toearlier_scld',\n 'education_bachelor_rate',\n 'incent_cnt_res_own',]\n\nRsqr_csn = ['pop25_some_college_plus_scld',\n 'travel_time_average_scld',\n 'land_area_scld',\n 'heating_fuel_coal_coke_scld',\n 'population_density_scld',\n 'hu_med_val_scld',\n 'pop_total_scld',\n 'hu_vintage_1939toearlier_scld',\n 'education_bachelor_rate',\n 'incent_cnt_res_own',\n ]\n\n\n\ngood_R2_Ravg = mean([0.20098292514467608, 0.20090205587065024, 0.20355407810942183, 0.1929803665011074,\n 0.18201814141868777, .18966013446910612])\ngood_R2_Aavg = mean([0.7252302025782689, 0.7152854511970534, 0.7344383057090239, 0.7233885819521179])\ngood_R2_votes = [good_Rsqr, good_Rsqr2, good_Rsqr3, good_Rsqr4, good_Rsqr5, good_Rsqr6]\n\n\nleast_model = ['population_density', 'education_bachelor', 'education_high_school_graduate_rate', 'hu_own',\n 'masters_or_above_rate','education_population_scld','average_household_income',\n ]\n\n# Estrella R-Squared : 0.17988423234112094\ntop_ten_predictors = ['bachelor_or_above_rate', 'net_metering', 'incent_cnt_res_own', 'hu_2000toafter',\n 'education_bachelor_scld', 'education_master_scld', 'education_professional_school_scld',\n 'land_area_scld', 'total_area_scld', 'pop25_some_college_plus_scld']\n\n\"\"\" Complete model file paths\"\"\"\nmodel_dec_30 = '__Data/__Mixed_models/December/DeepSolar_Model_2019-12-30_mega.xlsx'\nmodel_dec_30_scld = '__Data/__Mixed_models/December/DS_1_12_scld.xlsx'\nmodel_feb_20 = r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__DeepSolar\\Feb\\Mixed\\DeepSolar_Model_Feb2020-02-02-01-16-39.xlsx'\n\n\nincentives_M = ['Adoption', 'incent_cnt_res_own', 'net_metering_hu_own', 'incentive_count_nonresidential',\n 'incentive_count_residential', 'incentive_nonresidential_state_level',\n 'incentive_residential_state_level', 'net_metering', 'property_tax_bin']\n\npolicy_N = incentives_M + ['Ren', 'dlrs_kwh', 'avg_electricity_retail_rate_scld', 'avg_electricity_retail_rate']\n\nmodel_files = {'model_dec_30':model_dec_30,\n '':''}\n\n#occu_trunc = load_tree_trunc_features(dffile='__Data/__Mixed_models/occu/occuold/RF_FI_occu_1_5_trunc.xlsx', limit=.08)\n#climate_trunc = load_tree_trunc_features(dffile='__Data/__Mixed_models/climate/climateold/RF_FI_climate_1_2_trunc.xlsx', limit=.09)\n\ndrops = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'cust_cnt_scld', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax',\n 'solar_system_count', 'solar_panel_area_divided_by_area', 'solar_panel_area_per_capita',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n 'solar_system_count_residential',]\n\ndropsPVa = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'cust_cnt_scld', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax', 'solar_panel_area_per_capita_scld', 'solar_system_count_residential_scld',\n 'solar_panel_area_divided_by_area_scld', 'solar_system_count_scld',\n 'solar_system_count', 'solar_panel_area_divided_by_area', 'solar_panel_area_per_capita',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n 'solar_system_count_residential',]\n\ndropsPVar = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'cust_cnt_scld', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax', 'solar_panel_area_per_capita_scld', 'solar_system_count_residential_scld',\n 'solar_panel_area_divided_by_area_scld', 'solar_system_count_scld',\n 'solar_system_count', 'solar_panel_area_per_capita',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n 'solar_system_count_residential', 'Adoption']\ndropsPVres = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'cust_cnt_scld', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax', 'solar_panel_area_per_capita_scld', 'solar_system_count_residential_scld',\n 'solar_panel_area_divided_by_area_scld', 'solar_system_count_scld', 'Adoption',\n 'solar_system_count', 'solar_panel_area_divided_by_area', 'solar_panel_area_per_capita',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n ]\n\ndropsPVcap = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'cust_cnt_scld', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax', 'solar_panel_area_per_capita_scld', 'solar_system_count_residential_scld',\n 'solar_panel_area_divided_by_area_scld', 'solar_system_count_scld', 'Adoption'\n 'solar_system_count', 'solar_panel_area_divided_by_area',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n 'solar_system_count_residential',]\n\nRFR_solar_system_count_residential = [\n 'solar_system_count_residential',\n 'incent_cnt_res_own',\n 'daily_solar_radiation',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs',\n 'dlrs_kwh',\n 'net_metering',\n 'net_metering_hu_own',\n 'land_area',\n 'avg_monthly_consumption_kwh',\n 'hu_2000toafter',\n 'poverty_family_count',\n 'heating_design_temperature',\n 'population_density_scld',\n ]\n\ndropsBo = ['locale_recode', 'state', 'fips', 'climate_zone', 'company_na', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt', 'number_of_solar_system_per_household',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax',\n 'solar_system_count', 'solar_panel_area_divided_by_area', 'solar_panel_area_per_capita',\n #'daily_solar_radiation', 'solar_system_count_residential',]\n ]\n\n\n\n# allows number of solar stuff to be left in for a heat map\ndrops_Minus_Solar = ['locale_recode', 'climate_zone', 'company_ty', 'eia_id',\n #'geoid', 'locale', 'number_of_solar_system_per_household_scld',\n 'geoid', 'locale', 'cust_cnt',\n # 'FIPS', 'property_tax', 'number_of_solar_system_per_household']\n 'FIPS', 'property_tax']\nmodel_Dec28 = ['Adoption','own_popden_scld', 'cdd_std_scld', 'Green_Travelers', 'total_area_scld', 'masters_or_above_rate',\n '%hh_size_3', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld', '%female', 'hu_1959toearlier_scld', 'locale_dummy',\n 'travel_time_49_89_rate', 'diversity', 'age_25_34_rate', 'employ_rate_scld', 'Pro_Occup', 'net_metering_hu_own',\n 'average_household_income_scld', 'avg_monthly_consumption_kwh_scld', 'avg_monthly_consumption_kwh_scld',\n 'dlrs_kwh', 'avg_monthly_bill_dlrs_scld', 'Ren', 'age_45_54_rate', 'incentive_residential_state_level',\n 'number_of_years_of_education_scld', 'net_metering_hu_own', 'education_bachelor_scld', 'incent_cnt_res_own']\n\nmodel_31 = ['Adoption', 'population_density', 'travel_time_49_89_rate', 'age_10_14_rate', 'age_15_17_rate',\n 'occupation_transportation_rate', 'occupation_arts_rate', 'occupation_finance_rate', 'education_master',\n 'occupation_construction_rate', 'age_25_34_rate', 'age_45_54_rate', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n '%hh_size_3', 'diversity', '%hh_size_2', '%hh_size_2', 'Green_Travelers', '%hh_size_1', 'incent_cnt_res_own',\n 'Pro_Occup', 'mortgage_with_rate', 'hu_2000toafter_pct', 'cdd_std_scld', '%female', '%male', 'masters_or_above_rate',\n 'high_school_or_below_rate', 'net_metering_hu_own', 'total_area', 'dlrs_kwh', 'avg_inc_ebill_dlrs_scld',\n 'average_household_income_scld', 'Ren', 'locale_dummy', ]\n\npolicy_mixed = ['net_metering', 'property_tax', 'incent_cnt_res_own', 'incentive_count_residential',\n '','','','',\n '', '', '', '']\n\nmodel_slim = ['Adoption', 'population_density', 'high_school_or_below_rate', 'travel_time_10_19_rate',\n 'travel_time_49_89_rate', 'age_65_74_rate', 'age_18_24_rate', 'travel_time_10_19_rate', 'net_metering_hu_own',\n 'diversity', 'Green_Travelers', 'hu_1960to1979_pct', 'education_bachelor_scld']\n\nXu_Modelb = ['Adoption', 'incentive_count_residential_scld', 'incentive_residential_state_level_scld', 'net_meter_cate',\n 'dlrs_kwh', 'number_of_years_of_education_scld', 'education_less_than_high_school_rate', 'education_master_rate',\n 'median_household_income', 'employ_rate', 'female_pct', 'voting_2012_dem_percentage',\n 'hu_own_pct', 'diversity', 'age_35_44_rate', 'age_45_54_rate',\n 'age_55_64_rate', 'age_65_74_rate', 'population_density_scld', 'housing_unit_count',\n '%hh_size_3', 'land_area', 'locale_recode', 'hdd',\n 'heating_fuel_electricity_rate', 'heating_fuel_coal_coke_rate', 'hu_vintage_2010toafter', 'hu_vintage_1939toearlier',\n 'hu_vintage_1940to1959', 'hu_vintage_1960to1970', 'hu_vintage_1980to1999', 'Green_Travelers',\n 'avg_monthly_consumption_kwh', 'travel_time_40_59_rate', 'travel_time_60_89_rate']\n\n\nXu_ModelC = [\n 'incentive_count_residential', 'net_meter_bin', 'Ren',\n 'dlrs_kwh',\n 'property_tax_bin',\n 'number_of_years_of_education',\n 'education_HSorBELOW_rate', # TODO: fix this one\n 'education_master_or_above_rate',\n 'Zmedian_household_income',\n 'employ_rate',\n 'female_pct',\n 'voting_2012_dem_percentage',\n 'hu_own_pct',\n 'diversity',\n 'age_55_or_more_rate',\n 'population_density_scld',\n 'housing_unit_count_scld',\n '%hh_size_4',\n 'land_area_scld',\n 'locale_recodeRural', # TODO: need to create seperate variables for these 3\n 'locale_recodeSuburban',\n 'locale_recodeTown',\n 'hdd_scld',\n 'heating_fuel_electricity_rate',\n 'heating_fuel_coal_coke_rate',\n 'hu_1959toearlier_pct',\n 'hu_2000toafter_pct',\n 'Green_Travelers',\n 'avg_monthly_bill_dlrs' \n 'travel_time_40_89_rate',\n 'net_meter_binary1:hu_own_pct',\n 'property_tax_binary1:hu_own_pct',\n ]\n\n\n\nmodel_1_20 = ['Adoption', 'population_density_scld', 'education_bachelor_scld', 'travel_time_49_89_rate',\n 'occupation_transportation_rate', 'education_doctoral_scld', '%female', 'Green_Travelers',\n 'pop25_some_college_plus_scld', 'education_master_scld', '%hh_size_2', 'travel_time_10_19_rate',\n 'age_10_14_rate', 'age_more_than_85_rate', 'land_area_scld', 'travel_time_less_than_10_rate',\n 'occupation_finance_rate', 'occupation_construction_rate', 'diversity', 'cdd_std_scld',\n 'own_popden_scld', 'total_area_scld', 'age_18_24_rate', 'occupation_administrative_rate',\n 'heating_fuel_coal_coke_rate', 'incent_cnt_res_own', 'very_low_sf_own_mwh_scld', 'education_high_school_graduate_rate',\n 'hu__1980to1999_pct', 'Pro_Occup', 'travel_time_30_39_rate', 'age_65_74_rate',\n 'education_college_rate', '%hh_size_3', 'poverty_family_below_poverty_level_rate', 'hu_monthly_owner_costs_lessthan_1000dlrs_scld',\n 'hu_2000toafter_pct', '%hh_size_4', 'average_household_size_scld', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'household_type_family_rate', 'net_metering_hu_own', 'number_of_years_of_education_scld', 'avg_monthly_consumption_kwh_scld',\n 'avg_inc_ebill_dlrs_scld', 'voting_2012_gop_percentage', 'voting_2012_dem_percentage', 'Ren',\n 'dlrs_kwh', 'bachelor_or_above_rate', 'incentive_count_residential', 'locale_dummy']\n\nincome_stat = [ 'average_household_income',\n 'average_household_income_scld',\n 'fam_med_income',\n 'fam_med_income_scld',\n 'median_household_income',\n 'median_household_income_scld',\n ]\n\nedu_1_1 = ['Adoption', 'number_of_years_of_education',\n 'education_less_than_high_school_rate','masters_or_above_rate', 'bachelor_or_above_rate',\n 'high_school_or_below_rate',\n 'education_doctoral', 'education_doctoral_scld','education_doctoral_rate',\n 'education_master', 'education_master_scld', 'education_master_rate',\n 'education_bachelor', 'education_bachelor_scld','education_bachelor_rate',\n 'education_college', 'education_college_rate', 'education_college_scld',\n 'education_high_school_graduate', 'education_high_school_graduate_rate', 'education_high_school_graduate_scld',\n 'education_less_than_high_school','education_less_than_high_school_rate', 'education_less_than_high_school_scld',\n 'education_professional_school','education_professional_school_scld',\n 'education_professional_school_rate',\n ]\nedu_1_8 = ['Adoption',\n 'education_bachelor',\n 'education_bachelor_rate',\n 'education_bachelor_scld',\n 'education_college',\n 'education_college_rate',\n 'education_college_scld',\n 'education_doctoral',\n 'education_doctoral_rate',\n 'education_doctoral_scld',\n 'education_high_school_graduate',\n 'education_high_school_graduate_rate',\n 'education_high_school_graduate_scld',\n 'education_less_than_high_school',\n 'education_less_than_high_school_rate',\n 'education_less_than_high_school_scld',\n 'education_master',\n 'education_master_rate',\n 'education_master_scld',\n 'masters_or_above_rate',\n 'education_professional_school',\n 'education_professional_school_rate',\n 'education_professional_school_scld',\n 'bachelor_or_above_rate'\n ]\n\ngender_stat = [\n 'pop_female',\n 'pop_female_scld',\n 'pop_male',\n 'pop_male_scld',\n '%female',\n '%male',\n ]\n\nhousing_1_8 = [\n 'hu_vintage_1939toearlier',\n 'hu_vintage_1939toearlier_scld',\n 'hu_vintage_1940to1959',\n 'hu_vintage_1940to1959_scld',\n 'hu_vintage_1960to1970',\n 'hu_vintage_1960to1970_scld',\n 'hu_vintage_1980to1999',\n 'hu_vintage_1980to1999_scld',\n 'hu_vintage_2000to2009',\n 'hu_vintage_2000to2009_scld',\n 'hu_vintage_2010toafter',\n 'hu_vintage_2010toafter_scld',\n 'housing_unit_median_value',\n 'housing_unit_median_value_scld',\n 'hu_1959toearlier',\n 'hu_1959toearlier_scld',\n 'hu_1960to1979_pct',\n 'hu_2000toafter',\n 'hu_2000toafter_pct',\n 'hu_2000toafter_scld',\n 'hu__1980to1999_pct',\n 'hu_med_val',\n 'hu_med_val_scld',\n ]\n\nage_1_B = [\n 'age_10_14_rate',\n 'age_15_17_rate',\n 'age_18_24_rate',\n 'age_25_34_rate',\n 'age_35_44_rate',\n 'age_45_54_rate',\n 'age_55_64_rate',\n 'age_5_9_rate',\n 'age_65_74_rate',\n 'age_75_84_rate',\n 'age_median',\n 'age_median_scld',\n 'age_more_than_85_rate',\n 'E_AGE17',\n 'E_AGE17_scld',\n 'education_population',\n 'education_population_scld',\n 'pop_med_age',\n 'pop_med_age_scld',\n 'pop_over_65',\n 'pop_over_65_scld',\n 'pop_under_18',\n 'pop_under_18_scld'\n ]\n\nage_1_7 = [\n 'age_10_14_rate',\n 'age_15_17_rate',\n 'age_18_24_rate',\n 'age_25_34_rate',\n 'age_35_44_rate',\n 'age_45_54_rate',\n 'age_55_64_rate',\n 'age_5_9_rate',\n 'age_65_74_rate',\n 'age_75_84_rate',\n 'age_median',\n 'age_median_scld',\n 'age_more_than_85_rate',\n 'E_AGE17',\n 'education_population_scld',\n 'pop_med_age',\n 'pop_med_age_scld',\n 'pop_over_65',\n 'pop_over_65_scld',\n 'pop_under_18',\n 'pop_under_18_scld'\n ]\n\n\nfam_stat = [\n '%hh_size_1', # houshold size\n '%hh_size_2',\n '%hh_size_3',\n '%hh_size_4',\n 'average_household_size',\n 'average_household_size_scld',\n 'hh_size_1',\n 'hh_size_1_scld',\n 'hh_size_2',\n 'hh_size_2_scld',\n 'hh_size_3',\n 'hh_size_3_scld',\n 'hh_size_4',\n 'hh_size_4_scld',\n 'hh_total',\n 'hh_total_scld',\n 'household_count',\n 'household_count_scld',\n 'household_type_family_rate',\n 'poverty_family_below_poverty_level',\n 'poverty_family_below_poverty_level_rate',\n 'poverty_family_below_poverty_level_scld',\n 'poverty_family_count',\n 'poverty_family_count_scld',\n 'hu_own',\n 'hu_own_pct',\n 'hu_own_scld',\n ]\n\n\ndemo_1_2 = ['Adoption', 'number_of_years_of_education', 'education_less_than_high_school_rate',\n 'education_bachelor_scld', 'education_less_than_high_school_rate', 'education_high_school_graduate_rate',\n 'education_bachelor', 'education_bachelor_rate', 'education_master_scld', 'education_master_rate',\n 'education_doctoral_rate', 'masters_or_above_rate', 'bachelor_or_above_rate', 'high_school_or_below_rate',\n 'education_population', 'age_55_64_rate', 'age_65_74_rate', 'age_75_84_rate', 'age_more_than_85_rate',\n 'age_25_34_rate', 'age_median', 'fam_med_income', 'median_household_income', 'average_household_income_scld',\n 'average_household_income', 'diversity', 'pop_female', '%female', '%male', 'Anti_Occup', 'Pro_Occup',\n 'employ_rate', 'voting_2012_dem_percentage', 'voting_2012_gop_percentage', 'hu_own', 'hu_own_pct',\n 'hh_size_1', 'hh_size_2', 'hh_size_3', 'hh_size_4', '%hh_size_1', '%hh_size_2', '%hh_size_4',\n 'education_population_scld', 'hh_total', 'employ_rate', '%hh_size_2',\n 'high_school_or_below_rate', 'average_household_size', 'average_household_size_scld']\ndemo_1_2 = list(set(demo_1_2))\n\ndemo_top_00 = pd.read_excel('__Data/__Mixed_models/demo/demoold/top_00_vars_demoAra.xlsx', usecols=['Variable'])['Variable'].values.tolist() + ['Adoption']\ndemo_top_00_nopop = pd.read_excel('__Data/__Mixed_models/demo/demoold/top_00_vars_demoa_nopop.xlsx', usecols=['Variable'])['Variable'].values.tolist() + ['Adoption']\n#full_model_1_9 = pd.read_excel('__Data/__Mixed_models/mixed/full_model_12_28.xlsx')\ncost = [\n 'Adoption',\n 'dlrs_kwh',\n 'avg_electricity_retail_rate',\n 'avg_electricity_retail_rate_scld',\n 'housing_unit_median_gross_rent',\n 'housing_unit_median_gross_rent_scld',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'hu_monthly_owner_costs_lessthan_1000dlrs',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld',\n ]\n\ncost = [\n 'Adoption',\n 'dlrs_kwh',\n 'avg_electricity_retail_rate',\n 'avg_electricity_retail_rate_scld',\n 'housing_unit_median_gross_rent',\n 'housing_unit_median_gross_rent_scld',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs',\n 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'hu_monthly_owner_costs_lessthan_1000dlrs',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld',\n ]\n\nfull_model_1_9 = pd.read_excel('__Data/__Mixed_models/mixed/full_model_12_28.xlsx', usecols=['Variable'])['Variable'].values.tolist() + ['Adoption']\n\n# Score: 0.7297246212388425, Good is now:\ntop_3 = ['population_density', 'E_DAYPOP_scld', 'number_of_years_of_education']\n\nheating = [\n 'Adoption',\n 'heating_fuel_coal_coke',\n 'heating_fuel_coal_coke_rate',\n 'heating_fuel_coal_coke_scld',\n 'heating_fuel_electricity',\n 'heating_fuel_electricity_rate',\n 'heating_fuel_electricity_scld',\n 'heating_fuel_fuel_oil_kerosene',\n 'heating_fuel_fuel_oil_kerosene_rate',\n 'heating_fuel_fuel_oil_kerosene_scld',\n 'heating_fuel_gas',\n 'heating_fuel_gas_rate',\n 'heating_fuel_gas_scld',\n 'heating_fuel_housing_unit_count',\n 'heating_fuel_housing_unit_count_scld',\n 'heating_fuel_none',\n 'heating_fuel_none_rate',\n 'heating_fuel_none_scld',\n 'heating_fuel_other',\n 'heating_fuel_other_rate',\n 'heating_fuel_other_scld',\n 'heating_fuel_solar',\n 'heating_fuel_solar_rate',\n 'heating_fuel_solar_scld',\n ]\n\n\nforward_sel = ['population_density_scld', 'education_bachelor_scld', 'E_DAYPOP', 'pop_under_18',\n 'cdd_std_scld', 'diversity', 'own_popden_scld', 'cooling_design_temperature_scld',\n 'education_college_rate', 'property_tax_bin', 'cooling_design_temperature', 'cdd_std',\n 'incentive_count_residential']\n\npop_1_8 = [\n 'Adoption',\n 'pop_total',\n 'pop_total_scld',\n 'pop_under_18',\n 'pop_under_18_scld',\n 'hh_total',\n 'hh_total_scld',\n 'E_DAYPOP',\n 'E_DAYPOP_scld',\n 'population_density',\n 'population_density_scld',\n 'household_count',\n 'household_count_scld',\n 'housing_unit_count',\n 'housing_unit_count_scld',\n ]\n\npop_2_1_nonredn = [\n 'Adoption',\n 'pop_total',\n 'pop_under_18',\n 'hh_total',\n 'E_DAYPOP',\n 'population_density',\n 'household_count',\n 'housing_unit_count',\n ]\n\n\npop_1_2 = ['Adoption', 'E_DAYPOP', 'population_density', 'E_DAYPOP_scld', 'population_density_scld',\n 'pop_total', 'household_count', 'housing_unit_count',\n ]\n\nhabit_1_5 = ['Adoption', 'Green_Travelers', 'avg_monthly_bill_dlrs', 'avg_monthly_consumption_kwh',\n 'travel_time_40_59_rate', 'travel_time_10_19_rate', 'travel_time_20_29_rate',\n 'travel_time_60_89_rate', 'travel_time_49_89_rate', 'transportation_home_rate',\n 'travel_time_30_39_rate', 'travel_time_average', 'travel_time_less_than_10_rate',\n 'transportation_bicycle_rate', 'transportation_car_alone_rate', 'transportation_carpool_rate',\n 'transportation_motorcycle_rate', 'transportation_public_rate', 'transportation_walk_rate',\n ]\n\npolicy_1_2 = ['Adoption', '', '', '', '',\n '', '', '', '',\n '', '', '', '']\n\nphysical_1_2 = ['Adoption', 'heating_fuel_coal_coke_rate', 'heating_fuel_electricity_rate', 'hu_vintage_1939toearlier', 'hu_vintage_1940to1959',\n 'hu_vintage_1960to1970', 'hu_vintage_1980to1999', 'hu_vintage_2000to2009', 'hu_vintage_2010toafter',\n 'hu_1959toearlier', 'hu_2000toafter', 'household_count', 'hu_own_pct', 'hu_own',\n 'hu_monthly_owner_costs_lessthan_1000dlrs', 'hu_monthly_owner_costs_greaterthan_1000dlrs',\n 'hu_med_val', 'hu_med_val_scld', 'hu_mortgage', 'heating_fuel_fuel_oil_kerosene_rate',\n 'housing_unit_count_scld', 'housing_unit_count', 'hu_vintage_2000to2009_scld',\n 'hu_1959toearlier_scld', 'hu_monthly_owner_costs_greaterthan_1000dlrs_scld',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld']\nphysical_trunc = load_tree_trunc_features(dffile='__Data/__Mixed_models/physical/phyold/RF_FI_physical_trunc_2200 _20a.xlsx', limit=.03)\n\ngeo_1_5 = ['Adoption', 'land_area', 'locale_dummy', 'total_area', ]\ngeo_1_8 = [\n 'Adoption',\n 'land_area',\n 'land_area_scld',\n 'locale_dummy',\n 'total_area',\n 'total_area_scld',\n ]\n\ne_cost = [\n 'dlrs_kwh',\n 'avg_electricity_retail_rate',\n 'avg_electricity_retail_rate_scld',\n ]\n\nclimate_1_2 = ['Adoption', 'cooling_design_temperature', 'cdd', 'heating_design_temperature', 'hdd',\n 'cdd_std_scld', 'cdd_std', 'climate_zone', 'cdd_scld', 'hdd_scld',\n 'hdd_std', 'hdd_std_scld', 'heating_design_temperature_scld', 'cooling_design_temperature_scld',\n ]\n\noccu_1_5 = ['Adoption', 'occupation_administrative_rate', 'occupation_agriculture_rate', 'occupation_arts_rate',\n 'occupation_construction_rate', 'occupation_education_rate', 'occupation_finance_rate',\n 'occupation_information_rate', 'occupation_manufacturing_rate', 'occupation_public_rate',\n 'occupation_retail_rate', 'occupation_transportation_rate', 'occupation_wholesale_rate',\n ]\n\nownership = [\n 'Adoption',\n 'hu_mortgage',\n 'hu_mortgage_scld',\n 'mortgage_with_rate',\n 'hu_own',\n 'hu_own_pct',\n 'hu_own_scld',\n ]\n\n\nbest_guess = ['Adoption', 'population_density', 'avg_inc_ebill_dlrs', 'pop25_some_college_plus',\n 'hu_own', 'education_bachelor', 'masters_or_above_rate', 'diversity', 'incent_cnt_res_own',\n 'incentive_count_residential', 'hu_2000toafter',\n ]\n\nmixed_1_2 = ['Adoption', '', '', '', '',\n '', '', '', '',\n '', '', '', '']\nmixed_dif1 = pd.read_excel('__Data/__Mixed_models/some_dif_vars.xlsx')['Variable'].values.tolist()\n\nmix_1_12 = ['Adoption','population_density_scld', 'E_DAYPOP_scld', 'number_of_years_of_education_scld', 'total_area_scld',\n 'age_65_74_rate', 'poverty_family_below_poverty_level_rate', 'heating_fuel_solar_scld',\n 'travel_time_10_19_rate', 'occupation_finance_rate']\n\nRF_10v = ['E_DAYPOP_scld', 'number_of_years_of_education', 'population_density',\n 'hu_monthly_owner_costs_lessthan_1000dlrs_scld', 'avg_electricity_retail_rate_scld', 'travel_time_10_19_rate',\n 'hu_2000toafter_scld', 'heating_fuel_none', 'pop_over_65', 'incentive_count_nonresidential']\n\nmodel_empty = ['Adoption', '', '', '', '',\n '', '', '', '',\n '', '', '', '',\n ]\n\nmodel_dict_blocks = {'population': pop_1_2,\n 'demo':demo_1_2}\n\nacc_sets = [simple_set, simple_set2, simple_set3, simple_set4, simple_set5, simple_set6, simple_set7,\n simple_set8, simple_set9, simple_set9, simple_set10, simple_set11]\nRsqr_sets = [good_Rsqr, good_Rsqr2, good_Rsqr3, good_Rsqr4]\nLRacc_sets = [good_acc_lr, good_acc_lr2,good_acc_lr3, good_acc_lr4, good_acc_lr5,]\n\nRFaccuracy_votes = tally_var_votes(acc_sets, )\nRsqr_votes = tally_var_votes(Rsqr_sets,)\nLRacc_votes = tally_var_votes(LRacc_sets,)\n\nRF_voted = voted_list(RFaccuracy_votes, thresh=2)\nRsqr_voted = voted_list(Rsqr_votes, thresh=2)\nLRacc_voted = voted_list(LRacc_votes, thresh=2)\n\n\n# can be used to get to the variables in\n# a certain model.\nmodel_dir = {'incentives_M':incentives_M, # OK\n 'policy_N':policy_N, # OK\n 'policy_mixed':policy_mixed, #\n 'model_slim':model_slim, # OK\n 'model_Dec28': model_Dec28, # OK\n 'model_31': model_31, # OK\n 'Xu_Modelb': Xu_Modelb, #\n 'model_1_20': model_1_20, #\n 'demo_1_2': demo_1_2, # OK\n 'demo_top_00':demo_top_00, # OK\n 'demo_top_00_nopop':demo_top_00_nopop, # OK\n 'pop_1_2': pop_1_2, # OK\n 'policy_1_2': policy_1_2, #\n 'habbit_1_5': habit_1_5, #\n 'physical_1_2': physical_1_2, # OK\n 'physical_trunc': physical_trunc, # OK\n 'geo_1_5': geo_1_5, # OK\n 'climate_1_2': climate_1_2, # OK\n #'climate_trunc':climate_trunc, # OK\n 'mixed_1_2': mixed_1_2,\n 'mixed_dif1':mixed_dif1,\n 'occu_1_5':occu_1_5, # OK\n #'occu_trunc':occu_trunc, # OK\n 'best_guess':best_guess,\n 'least_model': least_model,\n 'edu_1_1': edu_1_1,\n 'age_1_7':age_1_7,\n 'edu_1_8':edu_1_8,\n 'popdenonly':popdenonly,\n 'fam_stat':fam_stat,\n 'income_stat':income_stat,\n 'gender_stat': gender_stat,\n 'housing_1_8':housing_1_8,\n 'pop_1_8':pop_1_8,\n 'geo_1_8':geo_1_8,\n 'cost':cost,\n 'e_cost':e_cost,\n 'heating':heating,\n 'ownership':ownership,\n 'full_model_1_9':full_model_1_9,\n 'forward_sel': forward_sel,\n 'top_ten_predictors':top_ten_predictors,\n 'simple_set':simple_set,\n 'simple_set2':simple_set2,\n 'simple_set3':simple_set3,\n 'simple_set4':simple_set4,\n 'simple_set5':simple_set5,\n 'simple_set6':simple_set6,\n 'simple_set7':simple_set7,\n 'simple_set8':simple_set8,\n 'simple_set9':simple_set9,\n 'simple_set10':simple_set10,\n 'good_acc_lr':good_acc_lr,\n 'good_acc_lr2':good_acc_lr2,\n 'good_acc_lr3':good_acc_lr3,\n 'good_acc_lr4':good_acc_lr4,\n 'good_acc_lr5':good_acc_lr5,\n 'good_Rsqr':good_Rsqr,\n 'good_Rsqr2':good_Rsqr2,\n 'good_Rsqr3':good_Rsqr3,\n 'good_Rsqr4':good_Rsqr4,\n 'RF_10v':RF_10v,\n 'simple_set11':simple_set11,\n 'simple_set12': simple_set12,\n 'simple_set13': simple_set13,\n 'good_acc_lr7':good_acc_lr7,\n 'LR_ac':LR_ac,\n 'Rsqr_LRs':Rsqr_LRs,\n 'RF_voted':RF_voted,\n 'Rsqr_voted':Rsqr_voted,\n 'LRacc_voted':LRacc_voted,\n } #\n\nblock_directories = {'climate':['__Data/__Mixed_models/climate/'],\n 'demo':['__Data/__Mixed_models/demo/'],\n 'geo':['__Data/__Mixed_models/geo/'],\n 'habit':['__Data/__Mixed_models/habbit/'],\n 'mixed':['__Data/__Mixed_models/mixed/'],\n 'occu':['__Data/__Mixed_models/occu/'],\n 'policy':['__Data/__Mixed_models/policy/'],\n 'physical':['__Data/__Mixed_models/physical/'],\n 'population':['__Data/__Mixed_models/population/'],\n 'education':['__Data/__Mixed_models/education/']\n }\n\n# names of feature lists to choose\n# use the index to load from file strings\npossible_features = ['incentives_M', # OK 0\n 'policy_N', # OK 1\n 'model_slim', # OK 2\n 'model_Dec28', # OK 3\n 'model_31', # OK 4\n 'Xu_Modelb', # OK 5\n 'demo_1_2', # OK 6\n 'climate_1_2', # OK 7\n 'pop_1_2', # OK 8\n 'geo_1_5', # OK 9\n 'habbit_1_5', # OK 10\n 'physical_1_2', # OK 11\n 'occu_1_5', # OK 12\n 'physical_1_2', # OK 13\n 'mixed', # OK 14\n 'demo_top_00', # OK 15\n 'demo_top_00_nopop', # OK 16\n 'occu_trunc', # OK 17\n 'physical_trunc', # OK 18\n 'mixed_dif1', # OK 19\n 'best_guess', # OK 20\n 'climate_trunc', # OK 21\n 'least_model', # 22\n 'edu_1_1', # 23\n 'age_1_7', # 24\n 'edu_1_8', # 25\n 'popdenonly', # 26\n 'fam_stat', # 27\n 'gender_stat', # 28\n 'income_stat', # 29\n 'pop_1_8', # 30\n 'geo_1_8', # 31\n 'housing_1_8', # 32\n 'cost', # 33\n 'e_cost', # 34\n 'heating', # 35\n 'ownership', # 36\n 'full_model_1_9', # 37\n 'forward_sel', # 38\n 'simple_set', # 39\n 'simple_set2', # 40\n 'simple_set3', # 41\n 'simple_set4', # 42\n 'simple_set5', # 43\n 'simple_set6', # 44\n 'simple_set7', # 45\n 'good_acc_lr', # 46\n 'good_acc_lr2', # 47\n 'good_acc_lr3', # 48\n 'good_acc_lr4', # 49\n 'good_acc_lr5', # 50\n 'simple_set8', # 51\n 'simple_set9', # 52\n 'simple_set10', # 53\n 'good_Rsqr', # 54\n 'good_Rsqr2', # 55\n 'good_Rsqr3', # 56\n 'good_Rsqr4', # 57\n 'top_ten_predictors', # 58\n 'RF_10v', # 59\n 'simple_set12', # 60\n 'simple_set11', # 61\n 'good_acc_lr7', # 62\n 'LR_ac', # 63\n 'Rsqr_LRs', # 64\n 'RF_voted', # 65\n 'Rsqr_voted', # 66\n 'LRacc_voted', # 67\n 'simple_set13', # 68\n ]\n\nfile_strings = {possible_features[0]:'incentives',\n # possible_features[]:'',\n possible_features[1]:'policy',\n possible_features[2]:'model_slim',\n possible_features[3]:'model_Dec28',\n possible_features[4]:'model_31',\n possible_features[5]:'Xu_Models', # TODO: need to add special case for this one\n possible_features[7]:'climate',\n possible_features[6]:'demo',\n possible_features[9]:'geo',\n possible_features[10]:'habbit',\n possible_features[11]:'incentives',\n possible_features[14]:'mixed',\n possible_features[12]:'occu',\n possible_features[13]:'physical',\n possible_features[8]:'population',\n possible_features[15]:'demo',\n possible_features[16]:'demo',\n possible_features[17]:'occu',\n possible_features[18]:'physical',\n possible_features[19]:'mixed',\n possible_features[20]:'mixed',\n possible_features[21]:'climate',\n possible_features[22]:'mixed',\n possible_features[23]:'education',\n possible_features[24]: 'age',\n possible_features[25]: 'education',\n possible_features[26]: 'mixed',\n possible_features[27]: 'family',\n possible_features[28]: 'gender',\n possible_features[29]: 'income',\n possible_features[30]: 'population',\n possible_features[31]: 'geo',\n possible_features[32]: 'housing',\n possible_features[33]: 'cost',\n possible_features[34]: 'ecost',\n possible_features[35]: 'heating',\n possible_features[36]: 'ownership',\n possible_features[37]: 'mixed',\n possible_features[38]: 'forward_sel',\n possible_features[39]: 'forward_sel',\n possible_features[40]: 'forward_sel',\n possible_features[41]: 'forward_sel',\n possible_features[42]: 'forward_sel',\n possible_features[43]: 'forward_sel',\n possible_features[44]: 'forward_sel',\n possible_features[45]: 'forward_sel',\n possible_features[46]: 'forward_sel',\n possible_features[47]: 'forward_sel',\n possible_features[48]: 'forward_sel',\n possible_features[49]: 'forward_sel',\n possible_features[50]: 'forward_sel',\n possible_features[51]: 'forward_sel',\n possible_features[52]: 'forward_sel',\n possible_features[53]: 'forward_sel',\n possible_features[54]: 'forward_sel',\n possible_features[55]: 'forward_sel',\n possible_features[56]: 'forward_sel',\n possible_features[57]: 'forward_sel',\n possible_features[58]: 'forward_sel',\n possible_features[59]: 'forward_sel',\n possible_features[60]: 'forward_sel',\n possible_features[61]: 'forward_sel',\n possible_features[62]: 'forward_sel',\n possible_features[63]: 'forward_sel',\n possible_features[64]: 'forward_sel',\n possible_features[65]: 'forward_sel',\n possible_features[66]: 'forward_sel',\n possible_features[67]: 'forward_sel',\n }\n\n\n\"\"\" These are the features you want in the census tract shape files for the heat maps \"\"\"\n\"\"\" need to have \"\"\"\nheat_map = ['fips', 'number_of_solar_system_per_household', 'incentive_count_residential',\n 'Ren', 'avg_electricity_retail_rate_scld', 'dlrs_kwh', 'net_metering', 'Adoption',\n 'incentive_count_nonresidential', 'population_density', 'education_bachelor_scld',\n 'incentive_residential_state_level', 'solar_system_count', 'solar_panel_area_divided_by_area',\n 'solar_panel_area_per_capita', 'daily_solar_radiation', 'solar_system_count_residential',\n ]\n\n\ndef model_selector(model_number):\n \"\"\" This will return a list of the variables for\n specific model, the direcotroy name to same it to\n and the actual data columns called use cols\n The model number corresponds to the index into possible features model dictionary\n \"\"\"\n model_vars = possible_features[model_number] # grabs featurs and adoption\n dir_name = file_strings[model_vars] # what diretory to save the results in based on its grouping\n usecols = model_dir[model_vars] # the independent variables of the model\n return model_vars, dir_name, usecols # all model variables, directory , predictors\n\n\ndef model_loader(target='Adoption', usecols=None, model_file=model_dec_30, new_drops=None, ret_full=True,\n verbose=False, impute=True, shuffle=False, heatmap=True, drops2=None):\n # will load data frames for a training set and possibly a heat map\n import numpy as np\n df_base = None\n if new_drops is not None:\n drops = new_drops\n if drops2 is None:\n drops2 = drops_Minus_Solar\n if usecols is not None:\n df_base = pd.read_excel(model_file, usecols=list(set(usecols + [target]+ heat_map))) # load data set with target\n hm_usecols = list(set(usecols + [target] + heat_map)) # load the data for the heat map\n df_HM = pd.read_excel(model_file,usecols=hm_usecols)\n else:\n # df_base = pd.read_excel(model_dec_30_scld).drop(columns=['Anti_Occup_scld', 'E_DAYPOP_scld', 'E_MINRTY_scld',\n # 'cust_cnt_scld', 'employ_rate_scld', ])\n df_base = pd.read_excel(model_file).drop(columns=drops)\n df_HM = pd.read_excel(model_file).drop(columns=drops2)\n\n # remove target from variable list for training\n dfattribs = list(df_base.columns.values.tolist()).copy()\n ogdfattribs = list(df_HM.columns.values.tolist()).copy()\n # show_list(list(df_base.columns.values.tolist()))\n del dfattribs[dfattribs.index(target)]\n del ogdfattribs[ogdfattribs.index(target)]\n\n # clean up the data by removing missing data\n df0 = df_base.loc[:, [target]+ dfattribs]\n dfOG = df_HM.loc[:, [target]+ ogdfattribs]\n if shuffle:\n from _products.utility_fnc import shuffle_deck\n shuffle_deck(df0)\n shuffle_deck(dfOG)\n if impute:\n df0.replace(np.inf, np.nan, inplace=True)\n df0.replace(-999, np.nan, inplace=True)\n df0.replace('', np.nan, inplace=True)\n df0 = df0.dropna()\n dfOG.replace(-999, np.nan, inplace=True)\n dfOG.replace('', np.nan, inplace=True)\n dfOG = dfOG.dropna()\n df0.drop(columns=heat_map)\n if verbose:\n print(\"The features in the model\")\n print(dfattribs)\n print('Model Statistics:')\n print(df0.describe())\n # df0 training data frame, dfog used to make heat map\n return (df0, dfOG), dfattribs, (df0.loc[:, dfattribs], df0[target])\n\n\ndef load_model(target='', usecols=None, model_file=model_feb_20, ret_full=True,\n verbose=False, impute=True, shuffle=False, heatmap=True, pdrops=None):\n # will load data frames for a training set and possibly a heat map\n import numpy as np\n df_base = None\n if pdrops is not None:\n udrops = pdrops\n if usecols is not None:\n #if pdrops is None:\n df_base = pd.read_excel(model_file, usecols=usecols + [target]).drop(columns=udrops) # load data set with target\n else:\n df_base = pd.read_excel(model_file,).drop(columns=udrops) # load data set with target\n else:\n # df_base = pd.read_excel(model_dec_30_scld).drop(columns=['Anti_Occup_scld', 'E_DAYPOP_scld', 'E_MINRTY_scld',\n # 'cust_cnt_scld', 'employ_rate_scld', ])\n # df_base = pd.read_excel(model_file).drop(columns=drops)\n if usecols is None:\n df_base = pd.read_excel(model_file) # load data set with target\n else:\n df_base = pd.read_excel(model_file, usecols=usecols + [target])\n #df_base = pd.read_excel(model_file,).drop(columns=pdrops)\n\n # remove target from variable list for training\n dfattribs = list(df_base.columns.values.tolist()).copy()\n # show_list(list(df_base.columns.values.tolist()))\n del dfattribs[dfattribs.index(target)]\n\n # clean up the data by removing missing data\n df0 = df_base.loc[:, [target]+ dfattribs]\n if shuffle:\n from _products.utility_fnc import shuffle_deck\n shuffle_deck(df0)\n if impute:\n df0.replace(-999, np.nan, inplace=True)\n df0.replace('', np.nan, inplace=True)\n df0 = df0.dropna()\n\n if verbose:\n print(\"The features in the model\")\n print(dfattribs)\n print('Model Statistics:')\n print(df0.describe())\n\n return df0, dfattribs, (df0.loc[:, dfattribs], df0[target])\n\n\n\nclass DeepSolarModel:\n def __init__(self, target, model_num=None, model_name=None, model_file=model_dec_30, variables=None,\n verbose=False, ts=.5):\n self.target, self.model_num, self.model_name, self.model_file, self.variables, self.verbose\\\n = target, model_num, model_name, model_file, variables, verbose\n self.model_vars, self.dir_name, self.usecols = model_selector(self.model_num)\n if self.verbose:\n print('Loading model {}'.format(self.model_vars))\n print('Using variables: {}'.format(self.usecols))\n\n # load the desired model, dropping any needed features\n df0s, Mfeatures, xy = model_loader(target, self.usecols, self.model_file)\n self.df0 = df0s[0] # original un altered data set\n self.dfog = df0s[1] # heat_map_version\n X0 = xy[0].values # predictors variables\n self.Xdf0 = xy[0] # heat map x\n self.y0 = xy[1].values # targets\n self.ydf0 = xy[1] # heat map targets\n\n Training, Testing = cross_val_splitter(self.df0, Mfeatures, ts=.5, verbose=True)\n\n self.X_tr, self.y_tr = Training[0], Training[1]\n self.X_ts, self.y_ts = Testing[0], Testing[1]\n\n\n\"\"\" Get the usecols or variables we want to use from the data sets\"\"\"\ndef get_DS_NREL_SVI_usecols(paths, ):\n ds = pd.read_excel(paths[0][0], sheet_name=paths[0][1])['variables'].values.tolist()\n nrel = pd.read_excel(paths[1][0], sheet_name= paths[1][1])['variables'].values.tolist()\n svi = pd.read_excel(paths[2][0], sheet_name= paths[2][1])['variables'].values.tolist()\n return ds, nrel, svi\n\n\"\"\" This will select certain states/census tracts ects from the deep solar set\"\"\"\ndef DeepSolarAreaSelector(ds_file, region_type, regions, ds_cols=None):\n if ds_cols is not None:\n ds = pd.read_excel(ds_file, usecols=ds_cols)\n else:\n ds = pd.read_excel(ds_file,)\n ds = ds.loc[ds[region_type].isin(regions)]\n return ds\n\n\"\"\" this will load the data sets for merging \"\"\"\ndef load_the_data(ds_file, region_type, regions, ds_cols,\n nrel_file, nrel_cols,\n svi_file, svi_cols, verbose=False):\n ds = DeepSolarAreaSelector(ds_file, region_type, regions, ds_cols)\n nrel = pd.read_excel(nrel_file, usecols=nrel_cols)\n svi = pd.read_excel(svi_file, usecols=svi_cols)\n return ds, nrel, svi\n\ndef merge_set_for_processing(set_lists, target, verbose=False):\n from _products.utility_fnc import data_merger\n # dsss = [ds, svi, nrel] == set_lists\n merged = data_merger(set_lists, target=target)\n if verbose:\n print('=======================================')\n print('The merged data set contains: ')\n print(merged.columns.values.tolist())\n print('=======================================')\n return merged\n\ndef cross_val_splitter(df0, rl, target='Adoption', ts=.5, verbose=False, stratify=True):\n from sklearn.model_selection import train_test_split\n targets0 = df0[target]\n df0 = df0.loc[:, rl]\n ts = .50\n tr = 1 - ts\n # Create training and testing sets for the data\n if stratify:\n X_train0, X_test0, y_train0, y_test0 = train_test_split(df0, targets0, stratify=targets0, test_size=ts,\n train_size=tr)\n else:\n X_train0, X_test0, y_train0, y_test0 = train_test_split(df0, targets0, test_size=ts,\n train_size=tr)\n if verbose:\n print('Training:')\n print(X_train0.describe())\n print('Testing:')\n print(X_test0.describe())\n return (X_train0, y_train0), (X_test0, y_test0)\n\n\n\ndef generate_green_travelers(merged, green_travelers=None):\n \"\"\" adds a summatin of those whom walk, ride a bike or work from home\"\"\"\n from _products.utility_fnc import create_combo_var_sum\n # pass a list of desired variables to sum, and the data frame the come from and get back the result\n if green_travelers is None:\n green_travelers = ['transportation_home_rate', 'transportation_bicycle_rate', 'transportation_walk_rate']\n create_combo_var_sum(merged, green_travelers, newvar='Green_Travelers')\n return\n\ndef generate_pro_Anti_occu(merged):\n \"\"\" Adds column for a summation of jobs negatively and postively correlated to adoption\"\"\"\n from _products.utility_fnc import create_combo_var_sum\n anti_jobs = ['occupation_agriculture_rate', 'occupation_construction_rate', 'occupation_transportation_rate',\n 'occupation_manufacturing_rate']\n pro_jobs = ['occupation_administrative_rate', 'occupation_information_rate', 'occupation_finance_rate',\n 'occupation_arts_rate', 'occupation_education_rate']\n create_combo_var_sum(merged, anti_jobs, newvar='Anti_Occup')\n create_combo_var_sum(merged, pro_jobs, newvar='Pro_Occup')\n return\n\ndef add_state_Ren(merged, ):\n from _products.utility_fnc import add_renewable_gen\n # ren = {\"al\":.091, 'ga':.076, 'ky':.062, 'ms':.029, 'nc':.128, 'sc':.053, 'tn':.133, 'va':.059, 'fl':.033}\n \"\"\" source for info below: https://www.energy.gov/maps/renewable-energy-production-state \"\"\"\n\n # ren = {\"al\":.1615, 'az':.1201, 'ca':0.2436, 'ga':.3636, 'ma':0.4272, 'ny':0.4479, 'tx':0.255, 'ut':0.151,\n # 'ky':.235, 'ms':.117, 'nc':.2573, 'sc':.1604, 'tn':.3521, 'va':.1054, 'fl':.4099}\n ren = {\"al\": .1615, 'az': .1201, 'ca': 0.2436, 'ga': .3636, 'ma': 0.4272, 'ny': 0.4479, 'tx': 0.255, 'ut': 0.151,\n 'ky': .235, 'ms': .117, 'nc': .2573, 'tn': .3521, 'va': .1054, }\n merged = add_renewable_gen(merged, 'state', ren)\n return\n\ndef locale_recode_action(merged, ):\n from _products.utility_fnc import recode_var_sub\n local_recode = {'Rural': 1, 'Town': 2, 'City': 4, 'Suburban': 3, 'Urban': 4}\n # below will replace any string containing the key with the vals\n local_recodeA = {'Rural': 'Rural', 'Town': 'Town', 'City': 'City', 'Suburban': 'Suburban', 'Urban': 'City'}\n sought = ['Rural', 'Town', 'City', 'Suburban', 'Urban']\n local = list(merged['locale'])\n merged['locale_dummy'] = recode_var_sub(sought, local, local_recode)\n merged['locale_recode'] = recode_var_sub(sought, local, local_recodeA)\n return\n\n\ndef gen_edu_combo(merged):\n from _products.utility_fnc import create_combo_var_sum\n high_below = ['education_less_than_high_school_rate', 'education_high_school_graduate_rate']\n merged['high_school_or_below_rate'] = create_combo_var_sum(merged, high_below)\n\n master_above = ['education_master_rate', 'education_doctoral_rate']\n merged['masters_or_above_rate'] = create_combo_var_sum(merged, master_above)\n\n bachelor_above = ['education_master_rate', 'education_doctoral_rate'] + ['education_bachelor_rate']\n merged['bachelor_or_above_rate'] = create_combo_var_sum(merged, bachelor_above)\n\n edu_excludes = ['high_school_or_below_rate', 'masters_or_above_rate',\n 'bachelor_or_above_rate'] + high_below + master_above + bachelor_above\n\n return edu_excludes\n\n\ndef gen_ownership_pct(merged):\n merged['hu_own_pct'] = (merged['hu_own'] / merged['housing_unit_count']).values.tolist()\n home_excludes = ['hu_own_pct']\n return home_excludes\n\ndef net_met_ptx_bin_recode(merged):\n from _products.utility_fnc import thresh_binary_recode\n thresh_binary_recode(merged, 'net_metering', )\n thresh_binary_recode(merged, 'property_tax', )\n policy_excludes = ['net_metering', 'property_tax']\n return policy_excludes\n\ndef gen_age_range(merged):\n from _products.utility_fnc import create_combo_var_sum\n # make range from 1959 to earlier variable\n hage1959toearlier = ['hu_vintage_1940to1959', 'hu_vintage_1939toearlier']\n merged['hu_1959toearlier'] = create_combo_var_sum(merged, hage1959toearlier)\n\n # make 60 to 79 pct variable\n merged['hu_1960to1979_pct'] = (merged['hu_vintage_1960to1970'] / merged['housing_unit_count']).values.tolist()\n\n # make 80 to 99 pct variable\n merged['hu_1980to1999_pct'] = (merged['hu_vintage_1980to1999'] / merged['housing_unit_count']).values.tolist()\n\n # make list of variabels to sum to get range variable from 2000 to beyond\n hage2000tobeyond = ['hu_vintage_2000to2009', 'hu_vintage_2010toafter']\n merged['hu_2000toafter'] = create_combo_var_sum(merged, hage2000tobeyond)\n\n # make percentage variable out of new variable\n merged['hu_2000toafter_pct'] = (merged['hu_2000toafter'] / merged['housing_unit_count']).values.tolist()\n hu_excludes = ['hu_1980to1999_pct', 'hu_2000toafter', 'hu_1960to1979_pct']\n return hu_excludes\n\n\ndef gen_hh_size(merged):\n from _products.utility_fnc import create_combo_var_sum, percentage_generator\n hh_sizes = ['hh_size_1', 'hh_size_2', 'hh_size_3', 'hh_size_4']\n merged['hh_total'] = create_combo_var_sum(merged, hh_sizes)\n merged['%hh_size_1'] = percentage_generator(merged, hh_sizes[0], 'hh_total')\n merged['%hh_size_2'] = percentage_generator(merged, hh_sizes[1], 'hh_total')\n merged['%hh_size_3'] = percentage_generator(merged, hh_sizes[2], 'hh_total')\n merged['%hh_size_4'] = percentage_generator(merged, hh_sizes[3], 'hh_total')\n\n\ndef gender_redodeing(merged):\n from _products.utility_fnc import create_combo_var_sum, percentage_generator\n female_count = 'pop_female'\n male_count = 'pop_male'\n total = 'pop_total'\n create_combo_var_sum(merged, [female_count, male_count], newvar=total)\n percentage_generator(merged, female_count, total, newvar='%female')\n percentage_generator(merged, male_count, total, newvar='%male')\n return\n\ndef gen_travel_time_mix(merged):\n from _products.utility_fnc import create_combo_var_sum, percentage_generator\n trav_recodes = ['travel_time_40_59_rate', 'travel_time_60_89_rate']\n create_combo_var_sum(merged, trav_recodes, newvar='travel_time_49_89_rate')\n travel_excludes = ['travel_time_49_89_rate']\n return travel_excludes\n\ndef gen_age_ranges(merged):\n from _products.utility_fnc import create_combo_var_sum, percentage_generator\n age_25_44 = ['age_25_34_rate','age_35_44_rate']\n age_25_64 = ['age_25_34_rate','age_35_44_rate', 'age_45_54_rate', 'age_55_64_rate']\n a_25_44 = 'age_25_44_rate'\n a_25_64 = 'age_25_64_rate'\n a_55_more = 'age_55_or_more_rate'\n #merged[a_25_44] = create_combo_var_sum(merged, age_25_44, newvar=a_25_44)\n #merged[a_25_44] = create_combo_var_sum(merged, age_25_44, newvar=a_25_44)\n #merged[a_25_64] = create_combo_var_sum(merged, age_25_64, newvar=a_25_64)\n create_combo_var_sum(merged, age_25_64, newvar=a_55_more)\n create_combo_var_sum(merged, age_25_64, newvar=a_25_64)\n create_combo_var_sum(merged, age_25_64, newvar=a_55_more)\n return\n\ndef gen_mixed(merged):\n from _products.utility_fnc import generate_mixed\n net_own = ['net_metering_bin', 'hu_own_pct']\n new_net = 'net_metering_hu_own'\n generate_mixed(merged, net_own, new_net)\n\n incent_res_own = ['incentive_count_residential', 'hu_own_pct']\n new_incent_own = 'incent_cnt_res_own'\n generate_mixed(merged, incent_res_own, new_incent_own)\n\n # incent_med_income = ['incentive_residential_state_level', 'median_household_income' ]\n # incent_state_income = 'incent_st_Mincome'\n # generate_mixed(merged, incent_med_income, incent_state_income)\n\n # incent_avg_income = ['incentive_residential_state_level', 'average_household_income' ]\n # incent_state_Aincome = 'incent_st_Aincome'\n # generate_mixed(merged, incent_avg_income, incent_state_Aincome)\n\n med_income_ebill = ['avg_monthly_bill_dlrs', 'median_household_income']\n medincebill = 'med_inc_ebill_dlrs'\n generate_mixed(merged, med_income_ebill, medincebill)\n\n avg_income_ebill = ['avg_monthly_bill_dlrs', 'average_household_income']\n avgincebill = 'avg_inc_ebill_dlrs'\n generate_mixed(merged, avg_income_ebill, avgincebill)\n\n own_popden = ['population_density', 'hu_own_pct']\n ownpopden = 'own_popden'\n generate_mixed(merged, own_popden, ownpopden)\n mixed_excludes = [new_net, new_incent_own]\n return mixed_excludes\n\ndef make_report_files(merged, pearson_fx=None, pearson_fc=None):\n from _products.utility_fnc import today_is\n import numpy as np\n merged = pd.DataFrame(merged.values, dtype=np.float, columns=merged.columns.tolist(), index=merged.index.tolist(), )\n # correlation table TODO: need to set some to pearson instead of kendal's tau\n kencorr = merged.corr(method='kendall').sort_values(by=['Adoption'], ascending=False, inplace=False)\n pearsoncorr = merged.corr(method='pearson').sort_values(by=['Adoption'], ascending=False, inplace=False)\n\n pearsoncorr.loc[:, 'Adoption'] = kencorr.loc[:, 'Adoption']\n pearsoncorr.loc['Adoption', :] = kencorr.loc['Adoption', :]\n if pearson_fx is None:\n pearson_fx ='__Data/__Mixed_models/December/DeepSolar_Model_correlation_{}_pearson.xlsx'.format(today_is())\n pearson_fc = '__Data/__Mixed_models/December/__DeepSolar_Model_correlation_{}_pearson.csv'.format(today_is())\n pearsoncorr.sort_values(by=['Adoption'], ascending=False, inplace=False).to_excel(pearson_fx)\n merged.corr(method='kendall').sort_values(by=['Adoption'], ascending=False, inplace=False).to_excel('__Data/__Mixed_models/December/DeepSolar_Model_correlation_{}_kendal.xlsx'.format(today_is()))\n\n pearsoncorr.sort_values(by=['Adoption'], ascending=False, inplace=False).to_csv(pearson_fc)\n merged.corr(method='kendall').sort_values(by=['Adoption'], ascending=False, inplace=False).to_csv('__Data/__Mixed_models/December/__DeepSolar_Model_correlation_{}_kendal.csv'.format(today_is()))\n return\n\ndef scale_merged(merged, excludes, scale_sub, gen_scld_only, verbose=True):\n from sklearn.preprocessing import MinMaxScaler\n from _products.utility_fnc import rmv_list_list, today_is\n scaler = MinMaxScaler()\n # list of things to remove\n rmv_scl = list(\n set(pd.read_excel('__Data/__Mixed_models/__Nominal_values_exclude_list.xlsx')['variables'].values.tolist()))\n rmv_scl += excludes\n add_back = list(set(['Adoption'] + excludes ))\n rmv_scl = list(set(rmv_scl))\n # remove the string based or unwanted varibles from set to scale\n #\n ma_tribs = merged.columns.values.tolist()\n scalables = rmv_list_list(ma_tribs, rmv_scl)\n # nrm = NORML()\n scldf = merged.loc[:, scalables]\n if verbose:\n print('remove list', rmv_scl)\n print('merged attribs', ma_tribs)\n print('scalables stuff\\n', scldf.columns)\n print(scalables)\n\n if scale_sub: # Ot substitute the scaled versions\n # if want to substitute do below\n nscalables = [s + '_scld' for s in scalables]\n # nrm.fit(scldf)\n scldf = pd.DataFrame(scaler.fit_transform(scldf), columns=nscalables, index=merged.index.values.tolist())\n if verbose:\n print('merge shape 1', merged.shape)\n print(nscalables)\n print(scldf)\n if gen_scld_only:\n if verbose:\n print('Only generating a file for scaled variables ')\n scldf.to_excel('__Data/__DeepSolar/Feb/Mixed/DeepSolar_Model_{}_scld_ONLY.xlsx'.format(today_is()),\n index=False)\n quit(1450)\n merged = merged.join(scldf, lsuffix='', rsuffix='_scld')\n # merged.index = nscalables\n merged.drop(columns=scalables, inplace=True)\n merged = merged.loc[:, add_back + scldf.columns.tolist()]\n print('Scaled data shape: ', merged.shape)\n print(merged)\n # merged.to_excel('__Data/__Mixed_models/December/DS_1_12_scld.xlsx', index=False)\n merged.to_csv('__Data/__DeepSolar/Feb/Mixed/DS_2_2_20_scld.csv', index=False)\n quit(402)\n else:\n # if want to add do below TODO: adding makes it to big create two and do seperate join\n # ,look into increasing storage to solve\n print('the data set will contain both version of variables')\n nscalables = [s + '_scld' for s in scalables]\n print(nscalables)\n # nrm.fit(scldf)\n scldf = pd.DataFrame(scaler.fit_transform(scldf), columns=nscalables, index=merged.index.values.tolist())\n print(scldf)\n # scldf.to_excel('_DeepSolar/DeepSolar_Model17Gscld.xlsx', index=False)\n # scaled_merged = nrm.transform(scldf, headers=scldf.columns.values.tolist())\n merged = merged.join(scldf, lsuffix='', rsuffix='_scld')\n print('shape: ', merged.shape)\n print(merged)\n # print(merged)\n return merged\n\ndef save_new_data(merged, scale_sub, newname, newreport, ):\n from _products.utility_fnc import today_is, report_var_stats\n if scale_sub:\n mdf = report_var_stats(merged,\n name=r'__Data/__DeepSolar/Feb/Mixed/DeepSolar_Model_var_stats{}_scld.xlsx'.format(\n today_is()))\n # merged.to_excel('__Data/__Mixed_models/December/DeepSolar_Model_{}_scld.xlsx'.format(today_is()), index=False) # create excel version\n merged.to_csv('__Data/__DeepSolar/Feb/Mixed/DeepSolar_Model_FEB{}_scld.csv'.format(today_is()),\n index=False) # create csv version\n else:\n mdf = report_var_stats(merged, name=r'__Data/__Mixed_models/December/DeepSolar_Model_var_stats{}.xlsx'.format(\n today_is()))\n # save the new data set as an excel and csv file\n # merged.to_excel('__Data/__DeepSolar/Feb/Mixed/DeepSolar_Model_{}.xlsx'.format(today_is()), index=False) # create excel version\n merged.to_csv('__Data/__DeepSolar/Feb/Mixed/DeepSolar_Model_Feb12_scld{}.csv'.format(today_is()),\n index=False) # create csv version\n\nclass DATA_SETS:\n def __init__(self,):\n self.DS_PATH = r'__Data/__DeepSolar/deepsolar_tract_orig_Adoption.xlsx'\n self.NREL_PATH = r'__Data/__NREL/NREL_seeds.xlsx'\n self.SVI_PATH7 = r'__Data/__SVI/SVI_SE_7.xlsx'\n self.SVI_PATH13 = r'__Data/__SVI/SVI_12.xlsx'\n def get_model(self, model):\n if model.upper() == 'DS':\n return self.DS_PATH\n elif model.upper() == 'NREL':\n return self.NREL_PATH\n elif model.upper() == 'SVI7':\n return self.SVI_PATH7\n elif model.upper() == 'SVI13':\n return self.SVI_PATH13\n else:\n print(\"Error: unknown data set type {}\".format(model))\n print('Calling method get_model(), DATA_SETS class')\n quit(-1576)\n\n\nmodel_7st_2_2 = '__Data/____Training/DeepSolar_Model_Feb7_scld.xlsx'\n\n\nmodel_12st_init = '__Data/____Training/DeepSolar_Model_Feb13_adopt.xlsx'\n\n\n" }, { "alpha_fraction": 0.876288652420044, "alphanum_fraction": 0.876288652420044, "avg_line_length": 47.5, "blob_id": "fa29957e052e0ac678c47f57d91133f32e63af02", "content_id": "bf80db0ba0a65bdcda05e2ef75582f119c994add", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 54, "num_lines": 2, "path": "/ML_imports_features.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "from sklearn.preprocessing import OneHotEncoder as ohe\nfrom sklearn.decomposition import FastICA\n" }, { "alpha_fraction": 0.7987679839134216, "alphanum_fraction": 0.8008213639259338, "avg_line_length": 59.75, "blob_id": "19d03805cfcbbc630661b842d4e062fe10ba5014", "content_id": "4830c73a391629cf5c69f65842863c19039ae1fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 487, "license_type": "no_license", "max_line_length": 132, "num_lines": 8, "path": "/README.md", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "# DeepSolar_Code_Base\nThe code used to analyze the Deep Solar, NREL seeds ii, and NREL data sets for the purposes of analyzing residential solar adoption.\nThe modules contain methods for things such as:\n* combining the 3 data sets in various ways\n* running various unique, sklearn, and statsmodels machine learning and data analysis tools on the data\n* visualization tools\n* various utility functions for:\n * dictionary, dataframe, and list manipulation, mathmatical calculations etc. \n" }, { "alpha_fraction": 0.8620689511299133, "alphanum_fraction": 0.8620689511299133, "avg_line_length": 57.33333206176758, "blob_id": "5c38272fa5055c699bcd4ed9c31fead22846c41f", "content_id": "156954b7c0ecaf1388f1e699e9618e893cc71f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 83, "num_lines": 3, "path": "/ML_imports_basic.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "from sklearn import metrics\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler" }, { "alpha_fraction": 0.4724179208278656, "alphanum_fraction": 0.483632892370224, "avg_line_length": 40.80561065673828, "blob_id": "b018ddea0de628174c330b2b1a136cdfa5397b96", "content_id": "1d437a9e57c2860ebbd0b7ed16e92f045fad0083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20865, "license_type": "no_license", "max_line_length": 134, "num_lines": 499, "path": "/visualization_tools.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "import matplotlib\nimport matplotlib.image as mpimg\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\npd.options.mode.use_inf_as_na = True\nfrom _products.utility_fnc import *\n\ndef gaussian_plot(xarrays, mus, stds, priors=[1,1], verbose=False):\n for xarray, mu, std, prior in zip(xarrays, mus, stds, priors):\n Visualizer().basic_plot(xarray, generate_gaussian(xarray, mu, std, prior, verbose), xlabel='x', ylabel='prob',\n title='test gaussian', show=False, fig_num=1, m_label=[['a'], ['b']], legend=True)\n plt.show()\n\nclass Visualizer:\n \"\"\" a lot of visualization methods\n There are:\n ploting methods:\n * dict_bar_plotter(): uses a dict to make a bar plot\n *\n stdout put methods\n * print_test_params: takes a dictionary of paramter names and values and prints them to stdout\n \"\"\"\n def print_test_params(self, param_d):\n print('Test Parameters:')\n for p in param_d:\n print(' * {0}{1}'.format(p, param_d[p]))\n return\n\n def dict_bar_plotter(self, bar_dict, xlabel='Number of Hidden Neurons', ylabel='Time to train seconds',\n title='Time to Complete for different Hidden neurons', save_fig=False, fig_name=''):\n y_pos = np.arange(len(bar_dict))\n bar_dict = sort_dict(bar_dict)\n performance = bar_dict.values()\n lables = list(bar_dict.keys())\n\n plt.barh(y_pos, performance, align='center', alpha=0.5)\n plt.yticks(y_pos, lables)\n plt.xlabel(ylabel)\n plt.ylabel('Number of hidden Neurons')\n plt.title(title)\n if save_fig:\n plt.savefig(fig_name)\n plt.show()\n\n def plot_confusion_matrix(self, y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n # classes = classes[unique_labels(y_true, y_pred)]\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n specificity = cm[0][0] / (cm[0][0] + cm[0][1])\n sensitivity = cm[1][1] / (cm[1][0] + cm[1][1])\n overall_acc = (cm[1][1] + cm[0][0]) / (cm[1][0] + cm[1][1] + cm[0][0] + cm[0][1])\n precision = (cm[0][0] / (cm[0][0] + cm[1][0]))\n print('Accuracy: {:.3f}'.format(overall_acc))\n print('Recall: {:.3f}'.format(sensitivity))\n print('Specificity: {:.3f}'.format(specificity))\n print('Precision: {:.3f}'.format(precision))\n title = 'Accuracy: {:.3f}\\nrecall: {:.3f}\\nprecision: {:.3f}\\nspecificity: {:.3f}'.format(overall_acc,\n sensitivity,\n precision,\n specificity)\n rd = {'Accuracy':overall_acc, 'Sensitivity':sensitivity,\n 'Precision':precision, 'Specificity':specificity, 'CM':cm}\n fig, ax = plt.subplots()\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes,\n title=title,\n ylabel='True label',\n xlabel='Predicted label')\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\")\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n rd['ax'] = ax\n return rd\n\n def basic_plot(self, x, y, xlabel='xlabel', ylabel='ylabel', title='K value vs accuracy',\n marker='x', show=False, fig_num=None, m_label=[''], legend=False):\n # artis for this plot\n art = None\n if fig_num is None:\n plt.figure()\n elif fig_num == 'ignore':\n pass\n else:\n plt.figure(fig_num)\n art = plt.plot(x,y,marker)\n #plt.scatter(x,y,color=color, marker=marker,label=m_label)\n if legend:\n plt.legend([m_label])\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n if show:\n plt.show()\n return art[0]\n\n def basic_plot_scatter(self, x, y, color='r', xlabel='xlabel', ylabel='ylabel', title='K value vs accuracy',\n marker='x', show=False, fig_num=None, m_label=''):\n if fig_num is None:\n plt.figure()\n elif fig_num == 'ignore':\n pass\n else:\n plt.figure(fig_num)\n #plt.plot(x,y,color=color, marker=marker,label=m_label)\n plt.scatter(x,y,color=color, marker=marker,label=m_label)\n lgd = plt.legend(loc='best')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n if show:\n plt.show()\n\n def sub_plotter(self, xarray, yarray, xlabels, ylabels, titles, markers, sharex='none', sharey='none', show=False,\n fig_num=None, orientation='v'):\n # set up the subplot arrays using the\n # length of xarray\n num_plots = len(xarray)\n if orientation == 'v':\n fig, axs = plt.subplots(nrows=num_plots, ncols=1, sharex=sharex, sharey=sharey)\n else:\n fig, axs = plt.subplots(nrows=1, ncols=num_plots, sharex=sharex, sharey=sharey)\n\n\n for i in range(num_plots):\n axs[i].plot(xarray[i], yarray[i])\n axs[i].set_xlabel(xlabel=xlabels[i])\n axs[i].set_ylabel(ylabel=ylabels[i])\n axs[i].set_title(titles[i])\n if show:\n plt.show()\n\n def multi_plot(self, xarray, yarray, xlabel='x label', ylabel='y label',\n title='MULTIPLOT TITLE', fig_num=None, legend_array=['me','you'], marker_array=['x', 'x'], show=False,\n show_last=False, save=False, fig_name='Fig'):\n found = False\n l = len(xarray)\n cnt = 0\n arts = list()\n for x, y, m, la in zip(xarray, yarray, marker_array, legend_array):\n if fig_num is None and not found:\n fig_num = plt.figure().number\n #print('Fig num',fig_num)\n if show_last:\n if cnt < l-1:\n a = self.basic_plot(x=x, y=y, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=[la], marker=m, show=False)\n arts.append(a)\n else:\n a = self.basic_plot(x=x, y=y, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=legend_array, marker=m, show=True, legend=True)\n arts.append(a)\n cnt += 1\n else:\n a = self.basic_plot(x=x, y=y, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=[la], marker=m, show=False)\n arts.append(a)\n lgd = plt.legend(arts, legend_array, loc='best')\n if save:\n plt.savefig(fig_name)\n plt.show()\n return fig_num\n\n\n def multi_plot_scatter(self, xarray, yarray, color_array=['r', 'b'], xlabel='x label', ylabel='y label',\n title='MULTIPLOT TITLE', fig_num=None, legend_array=['me','you'], marker_array=['x', 'x'], show=False,\n show_last=False):\n found = False\n l = len(xarray)\n cnt = 0\n for x, y, c, la, m in zip(xarray, yarray, color_array, legend_array, marker_array):\n if fig_num is None and not found:\n fig_num = plt.figure().number\n #print('Fig num',fig_num)\n if show_last:\n if cnt < l-1:\n self.basic_plot_scatter(x=x, y=y, color=c, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=la, marker=m, show=False)\n else:\n self.basic_plot_scatter(x=x, y=y, color=c, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=la, marker=m, show=True)\n cnt += 1\n else:\n self.basic_plot_scatter(x=x, y=y, color=c, xlabel=xlabel, ylabel=ylabel, title=title, fig_num=fig_num,\n m_label=la, marker=m, show=show)\n return fig_num\n\n\n def bi_class_colored_scatter(self, x, y, class_dict, fig_num=None, legend=['class 0', 'class 1'], annotate=False, show=True,\n xl='x', yl='y', title='title'):\n for X, Y in zip(x,y):\n plt.scatter(X[0], X[1], c=class_dict[Y])\n plt.title(title)\n plt.xlabel(xl)\n plt.ylabel(yl)\n leg = plt.legend(legend, loc='best', borderpad=0.3, shadow=False, markerscale=0.4)\n leg.get_frame().set_alpha(0.4)\n if show:\n plt.show()\n\n\n def bi_class_scatter3D(self, x, y, class_dict, fig_num=None, legend=['class 0', 'class 1'], annotate=False, show=True, treD=False,\n xl = 'x', yl='y', zl='z', cols=(0, 1, 2), title='3D Class Scatter'):\n\n a = cols[0]\n b = cols[1]\n c = cols[2]\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n for X, Y in zip(x, y):\n ax.scatter(X[a], X[b], X[c], c=class_dict[Y])\n plt.legend(['non adopters', 'adopters'])\n ax.set_xlabel(xl)\n ax.set_ylabel(yl)\n ax.set_zlabel(zl)\n plt.title(title)\n plt.show()\n\n def fancy_scatter_plot(self, x, y, styl, title, c, xlabel, ylabel, labels, legend,\n annotate=True, s=.5, show=False):\n\n for z1, z2, label in zip(x, y, labels):\n plt.scatter(z1, z2, s=s, c=c)\n if annotate:\n plt.annotate(label, (z1, z2))\n\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n leg = plt.legend([legend], loc='best', borderpad=0.3,\n shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),\n markerscale=0.4)\n leg.get_frame().set_alpha(0.4)\n leg.draggable(state=True)\n\n if show:\n plt.show()\n\n def make_prop_o_var_plot(self, s, num_obs, threshold=.95, show_it=True, last_plot=True):\n\n sum_s = sum(s.tolist())\n\n ss = s ** 2\n\n sum_ss = sum(ss.tolist())\n\n prop_list = list()\n\n found = False\n\n k = 0\n\n x1, y1, x2, y2, = 0, 0, 0, 0\n p_l, i_l = 0, 0\n found = False\n\n for i in range(1, num_obs + 1):\n perct = sum(ss[0:i]) / sum_ss\n # perct = sum(s[0:i]) / sum_s\n\n if np.around((perct * 100), 0) >= threshold*100 and not found:\n y2 = perct\n x2 = i\n x1 = i_l\n y1 = p_l\n found = True\n prop_list.append(perct)\n i_l = i\n p_l = perct\n\n if np.around(y2, 2) == .90:\n k_val = x2\n else:\n print('it is over 90%', x2)\n #vk_val = line_calc_x(x1, y1, x2, np.around(y2, 2), .9)\n\n single_vals = np.arange(1, num_obs + 1)\n\n if show_it:\n fig = plt.figure(figsize=(8, 5))\n plt.plot(single_vals, prop_list, 'ro-', linewidth=2)\n plt.title('Proportion of Variance, K should be {:d}'.format(x2))\n plt.xlabel('Eigenvectors')\n plt.ylabel('Prop. of var.')\n\n p90 = prop_list.index(y2)\n\n # plt.plot(k_val, prop_list[p90], 'bo')\n plt.plot(x2, prop_list[p90], 'bo')\n\n leg = plt.legend(['Eigenvectors vs. Prop. of Var.', '90% >= variance'],\n loc='best', borderpad=0.3,shadow=False, markerscale=0.4)\n leg.get_frame().set_alpha(0.4)\n #leg.draggable(state=True)\n\n if last_plot:\n plt.show()\n\n return x2\n\n\n def Groc(self, tpr, tnr):\n self.basic_plot(1-tnr, tpr)\n\n def gaussian_plot(self, xarrays, mus, stds, priors=[1, 1], verbose=False):\n for xarray, mu, std, prior in zip(xarrays, mus, stds, priors):\n Visualizer().basic_plot(xarray, generate_gaussian(xarray, mu, std, prior, verbose), xlabel='x',\n ylabel='prob',\n title='test gaussian', show=False, fig_num=1, m_label=[['a'], ['b']], legend=True)\n plt.show()\n # ================================================================================\n # ================================================================================\n # ====== std out methods ==============\n # ================================================================================\n # ================================================================================\n def string_padder(self,str='What Up Yo!', pstr=' ', addstr='Just Added', padl=20, right=True):\n if right:\n return str + '{:{}>{}s}'.format(addstr, pstr, padl)\n return str + '{:{}<{}s}'.format(addstr, pstr, padl)\n\n def border_maker(self, item, bsize=35):\n rs = ''\n for i in range(bsize):\n rs += item\n return rs\n\n def border_printer(self, border, padl=2):\n for i in range(padl):\n print(border)\n\n def create_label_string(self, label, border, lpad=4, lpstr=' ', b_size=35):\n # calculate border left over\n rpd = self.border_maker(lpstr, lpad)\n label = rpd + label + rpd\n b_left_over = b_size - len(label)\n if b_left_over%2 == 0:\n bleft = int(b_left_over/2)\n bright = int(b_left_over/2)\n else:\n bleft = int(np.around((b_left_over/2), 0))-1\n bright = int(np.around(b_left_over/2, 0))\n\n #return self.string_padder(str=border[0:bleft-(len(label))], pstr=lpstr, addstr=label, padl=lpad,\n return border[0:bleft] + label + border[0:bright]\n\n def block_label(self, label, lpad=4, lpstr=' ', border_marker=None, border_size=35, bpadl=2):\n if border_marker is not None:\n border =self.border_maker(border_marker, bsize=border_size)\n self.border_printer(border, padl=bpadl)\n else:\n border = self.border_maker('=', bsize=border_size)\n self.border_printer(border, padl=bpadl)\n\n print(self.create_label_string(label, border, lpad=lpad, lpstr=lpstr, b_size=border_size))\n\n if border_marker is not None:\n self.border_printer(self.border_maker(border_marker, bsize=border_size), padl=bpadl)\n else:\n self.border_printer(self.border_maker('=', bsize=border_size), padl=bpadl)\n\n def display_significance(self, feature_sig, features, verbose=False):\n \"\"\"Takes \"\"\"\n rd = {}\n for s, f in zip(feature_sig, features):\n rd[f] = s\n\n sorted_rd = dict(sorted(rd.items(), key=operator.itemgetter(1), reverse=True))\n if verbose:\n display_dic(sorted_rd)\n return sorted_rd\n\n def show_performance(self, scores, verbose=False, retpre=False):\n \"\"\"displays a confusion matrix on std out\"\"\"\n true_sum = scores['tp'] + scores['tn']\n false_sum = scores['fp'] + scores['fn']\n sum = true_sum + false_sum\n\n # do this so we don't divde by zero\n tpfp = max(scores['tp']+scores['fp'], .00000001)\n tpfn = max(scores['tp']+scores['fn'], .00000001)\n precision = scores['tp']/tpfp\n recall = scores['tp']/tpfn\n accuracy = true_sum / sum\n # probability ot a true positive\n sensitivity = scores['tp'] / (scores['tp'] + scores['fn'])\n # probability ot a true negative\n specificity = scores['tn'] / (scores['tn'] + scores['fp'])\n if verbose:\n print('=====================================================')\n print('=====================================================')\n print(' | predicted pos | predicted neg |')\n print('----------------------------------------------------')\n print(' actual pos | {:d} | {: 3d} |'.format(scores['tp'], scores['fn']))\n print('----------------------------------------------------')\n print(' actual neg | {:d} | {:d} |'.format(scores['fp'], scores['tn']))\n print('-------------------------------------------------------------------')\n print(' Correct | {:d}'.format(true_sum))\n print(' Total | % {:d}'.format(sum))\n print(' | ------------------------')\n print(' Accuracy | {:.2f}'.format(accuracy))\n print(' Precision | {:.2f}'.format(precision))\n #print(' recall | {:.2f}'.format(recall))\n print(' Sensitivity | {:.2f}'.format(sensitivity))\n print(' Specificity | {:.2f}'.format(specificity))\n print('=======================================================================================')\n if retpre:\n return accuracy, sum, sensitivity, specificity, precision\n\n return accuracy, sum, sensitivity, specificity\n\n\n def show_image(self, filename):\n \"\"\"\n Can be used to display images to the screen\n :param filename:\n :return:\n \"\"\"\n img = mpimg.imread(filename)\n plt.imshow(img)\n plt.show()\n\n def display_DT(self, estimator, features, classes, newimg='tree.png', tmpimg='tree.dot', precision=2):\n from sklearn.tree import export_graphviz\n import io\n import pydotplus\n #graph = Source(export_graphviz(estimator, out_file=None\n # , feature_names=features, class_names=['0', '1']\n # , filled=True))\n #display(SVG(graph.pipe(format='svg')))\n # plot_tree(estimator, filled=True)\n # plt.show()\n # return\n\n # Export as dot file\n export_graphviz(estimator, out_file=tmpimg,\n feature_names=features,\n class_names=classes,\n rounded=True, proportion=False,\n precision=3, filled=True)\n #from subprocess import call\n #call(['dot', '-Tpng', tmpimg, '-o', newimg, '-Gdpi=600'])\n # os.system('dot -Tpng {} -o {}, -Gdpi=600'.format(tmpimg, newimg))\n # Display in python\n #import matplotlib.pyplot as plt\n\n # Draw graph\n #graph = graphviz.Source(dot_data)\n #dotfile = io.StringIO()\n graph = pydotplus.graph_from_dot_file(tmpimg)\n graph.write_png(newimg)\n print(graph)\n\n # Convert to png using system command (requires Graphviz)\n\n # plt.figure(figsize=(14, 18))\n # plt.imshow(plt.imread(newimg))\n # plt.axis('off')\n # plt.show()\n\n #from subprocess import call\n #os.system('dot -Tpng tmpimg -o newimg, -Gdpi=600')\n #self.show_image(newimg)\n\n\n\n\n" }, { "alpha_fraction": 0.6767123341560364, "alphanum_fraction": 0.6767123341560364, "avg_line_length": 54.30303192138672, "blob_id": "3b691d979c3e1a9b3db35dc7ac4fe0bf5943cddd", "content_id": "4e8b9ec5ded5ab1a68ba88cd99156039c5b9ca6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1825, "license_type": "no_license", "max_line_length": 105, "num_lines": 33, "path": "/Model_Builders/SVI_ModelStacker.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "\"\"\" This file will load a set of given SVI csv files and stack them into one big file\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\n\n\nAL_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Alabama.csv')\nAZ_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Arizona.csv')\nCA_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\California.csv')\nGA_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Georgia.csv')\nKY_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Kentucky.csv')\nMA_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Massachusetts.csv')\nMS_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Mississippi.csv')\nNY_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\NewYork.csv')\nNC_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\NorthCarolina.csv')\nTN_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Tennessee.csv')\nTX_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Texas.csv')\nUT_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Utah.csv')\nVA_df = pd.read_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI\\Virginia.csv')\ncolsmn = AL_df.columns.values.tolist()\n\nnew_df = AL_df.values\n\nstates = [AL_df, AZ_df, CA_df, GA_df, KY_df, MA_df, MS_df, NY_df, NC_df, TN_df, TX_df, UT_df, VA_df,]\nstates = [AZ_df, CA_df, GA_df, KY_df, MA_df, MS_df, NY_df, NC_df, TN_df, TX_df, UT_df, VA_df,]\n\"\"\" NOW STACK AND SAVE THEM \"\"\"\nfor st in states:\n new_df = np.vstack((new_df, st.values))\n\nnew_df = pd.DataFrame(new_df, columns=colsmn)\nnew_df.to_csv(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI/SVI_.csv')\n# new_df.to_excel(r'C:\\Users\\gjone\\DeepSolar_Code_Base\\__Data\\__SVI/SVI_.xlsx')\n" }, { "alpha_fraction": 0.8793103694915771, "alphanum_fraction": 0.8793103694915771, "avg_line_length": 57, "blob_id": "6789887bf984b519b179cf64a83a2b273f8368c4", "content_id": "b6b36302865b3ffca74a1985f426e0809bdc7ae1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 116, "license_type": "no_license", "max_line_length": 58, "num_lines": 2, "path": "/ML_imports_RF_TR.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "from sklearn.ensemble import RandomForestClassifier as RFC\nfrom sklearn.ensemble import ExtraTreesClassifier as ETC\n" }, { "alpha_fraction": 0.45185166597366333, "alphanum_fraction": 0.46302589774131775, "avg_line_length": 44.26305389404297, "blob_id": "21b38aed22d33e0abf7bf780d8247c42b1fce596", "content_id": "756b9c5be60a9ead303be047bbb0ac6eb1a56ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160369, "license_type": "no_license", "max_line_length": 204, "num_lines": 3543, "path": "/ML_Tools.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nfrom math import *\nimport sys\nimport matplotlib.pyplot as plt\nfrom _products.visualization_tools import *\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split\nfrom _products.utility_fnc import *\nfrom sklearn import metrics\n\n\nviz = Visualizer()\n\n# =========================================================================\n# =========================================================================\n# TODO:Feature Selection and Preprocessing tools\n# =========================================================================\n# =========================================================================\n\ndef LCN_transform(df_base, target='Adoption', mtc=0, mxcc=1, corrs=('kendall', 'pearson'), inplace=False, verbose=False):\n \"\"\" This will peform a LCN search and either return a reduced data frame\n or reduce the given\n :param df: data frame\n :param target: the target('s) of the analysis\n :param mtc: minimum target correlation\n :param mxcc: maximum cross correlation between independent variables\n :param corrs: types of correlation matrices to create\n :param inplace: if true will modify original, otherwise returns a reduced version\n :return:\n \"\"\"\n corr_dfk = df_base.corr(method=corrs[0])\n corr_df = df_base.corr(method=corrs[1])\n corr_df[target] = corr_dfk[target].values.flatten()\n corr_df.loc[target, :] = corr_dfk.loc[target,:]\n\n dfattribs = list(df_base.columns.values.tolist()).copy()\n del dfattribs[dfattribs.index(target)]\n print(dfattribs)\n\n lcn_d = LCN(corr_df, target=target)\n\n rl = HCTLCCL(lcn_d, [], target=target, options=dfattribs, target_corr_lim=mtc,\n cross_corr_lim=mxcc)\n\n if verbose:\n print()\n print('return features:')\n print(rl)\n print()\n\n return df_base.loc[:, rl], df_base.loc[:, [target]]\n\n\n\n# sorts a correlation and\ndef LCN(corr_M, threshold=100, target='Adoption'):\n \"\"\"Takes a correlation matrix some threshold value and the name of the target column.\n This method will use the given correlation matrix to create a dictionary for all features\n in the matrix where the keys are the features and the values are a dictionary of all other\n features as keys and vals are the key features correlations to those variables....\n dict = {'feat1': {'feat2: correl_feat1_feat2}}\n result is a dictionary keyed on the features, with values of a sorted dictionary keyed on other\n features sorted on correlation\n :param corr_M: correlation matrix\n :param threshold: TODO: I don't remeber exactly what this does\n :param target:\n :return:\n \"\"\"\n # go through Data frame of correlations grabbing the list and sorting from lowest to\n # grab the attribs\n attribs = corr_M.columns.values.tolist()\n lv1_d = {}\n for ata in attribs:\n lv1_d[ata] = dict()\n for atb in attribs:\n if ata != atb and atb != target:\n if corr_M.loc[ata, atb] < threshold:\n lv1_d[ata][atb] = abs(corr_M.loc[ata, atb])\n if ata == target:\n lv1_d[ata] = sort_dict(lv1_d[ata], reverse=True)\n else:\n lv1_d[ata] = sort_dict(lv1_d[ata], reverse=False)\n\n return lv1_d\ndef HCTLCCL(corr_dic, start_vars, target, options, target_corr_lim = .09, cross_corr_lim=.55):\n rl = list(start_vars)\n for p_var in corr_dic[target]:\n if corr_dic[target][p_var] > target_corr_lim and p_var not in rl and p_var in options:\n rl = check_correlation(p_var, corr_dic, rl, '', cross_corr_lim)\n if corr_dic[target][p_var] < target_corr_lim:\n return rl\n return rl\ndef check_correlation(check_var, corr_dic, cl, used, cross_corr_lim = .55):\n # go through current list\n for variable in cl:\n # checking cross correlation between the possible\n # variable to be added and the current one from\n # the current list if it surpasses the threshold\n # return the current list as is\n if corr_dic[check_var][variable] > cross_corr_lim:\n return cl\n # if correlations with all current variables\n # are within limits return the current list\n # updated with the new value\n return cl + [check_var]\ndef forward_selector_test(x, y, x2, y2):\n pass\n# =========================================================================\n# =========================================================================\n# TODO: Statistics and Preprocessing\n# =========================================================================\n# =========================================================================\nclass NORML():\n \"\"\"a data normalizer. has two normalization methods\n 1) min max normalization:\n * equation : (X-Min_val)/(Max_val - Min_val)\n * rescales the data to [0,1] values\n * centers pdf on the mean\n 2) z standardization (X-Min_val)/(Max_val - Min_val)\n * equation : (X-Mean)/(Standard_Deviation)\n * rescales the data to [-1,1] values\n * centers pdf on 0 with std = 1\n Select the type by setting the input argument\n nrmlz_type to:\n * : minmax for option 1\n * : zstd for option 2\n \"\"\"\n def __init__(self, nrmlz_type='minmax'):\n self.mu=None\n self.std=None\n self.cov=None\n self.corr=None\n self.cov_inv=None\n self.cov_det=None\n self.min = None\n self.max = None\n self.normlz_type=nrmlz_type\n\n def set_type(self, n_type):\n self.normlz_type = n_type\n\n def fit(self, df=None, X=None):\n if type(df) != type(np.array([0])):\n df = pd.DataFrame(df)\n self.process_df(df)\n\n def process_df(self, df):\n self.mu = df.values.mean(axis=0)\n self.std = df.values.std(axis=0)\n self.min = df.min()\n self.max = df.max()\n self.cov = df.cov()\n self.cov_inv = np.linalg.inv(self.cov)\n self.cov_det = np.linalg.det(self.cov)\n\n def transform(self,df, headers=None):\n if type(df) !=type(np.array([0])):\n df = pd.DataFrame(df)\n if self.normlz_type == 'zstd':\n if headers is not None:\n return pd.DataFrame((df - self.mu) / self.std, columns=headers)\n return pd.DataFrame((df - self.mu) / self.std)\n elif self.normlz_type == 'minmax':\n if headers is not None:\n return pd.DataFrame((df - self.min) / (self.max - self.min), columns=headers)\n return pd.DataFrame((df - self.min) / (self.max - self.min))\n\ndef standardize_data(X, X2, scaler_ty='minmax'):\n #scaler_ty = 'std'\n if scaler_ty == 'minmax':\n mm_scaler = MinMaxScaler()\n mm_scaler.fit(X)\n Xtrn = mm_scaler.transform(X)\n Xtsn = mm_scaler.transform(X2)\n elif scaler_ty == 'std':\n std_scaler = StandardScaler()\n std_scaler.fit(X)\n Xtrn = std_scaler.transform(X)\n Xtsn = std_scaler.transform(X2)\n return Xtrn, Xtsn\n\ndef cross_val_splitter(X, y, tr=.5, ts=.5, vl=0, seed=None, verbose=False, target=None):\n train_idx, val_idx, test_idx = split_data(X, y, p_train=tr, p_test=vl, p_val=ts, verbose=verbose,seed=seed)\n\ndef split_data(X, y, p_train=.70, p_test=.30, p_val=.0, priors = None, verbose=False, seed=False, lr=True):\n \"\"\"Returns a randomized set of indices into an array for the purposes of splitting data\"\"\"\n dXY = None\n if type(X) != type(pd.DataFrame([0])):\n nx = pd.DataFrame(X)\n nx[X.shape[1]] = y.values.tolist()\n dXY = nx.values\n np.random.shuffle(dXY)\n N = len(X)\n\n train = int(np.around(N * p_train, 0))\n if p_val == 0:\n test = int(np.around(N * p_test, 0, ))\n val = 0\n else:\n test = N - train\n val = N - train - test\n\n tr = dXY[0:train]\n ts = dXY[train:train+test]\n if p_val != 0:\n vl = dXY[train+test:]\n\n tr_X, tr_y = tr[:][0:len(dXY[0])], tr[:][len(dXY[0])]\n ts_X, ts_y = ts[:][0:len(dXY[0])], ts[:][len(dXY[0])]\n vl_X, vl_y = list(), list()\n if p_val != 0:\n vl_X, vl_y = tr[:][0:len(dXY[0])], tr[:][len(dXY[0])]\n '''\n if priors is not None:\n print('need to set up the distribution of the weights')\n\n if verbose:\n print('train set size: ', train)\n print('test set size: ', test)\n print('val set size: ', val)\n np.random.shuffle(X)\n tr_idx = rc\n\n for i in range(0, train):\n trn_idx.append(r_c[i])\n\n for i in range(train, train+test):\n tst_idx.append(r_c[i])\n\n for i in range(train+test, data_size):\n val_idx.append(r_c[i])\n\n if val == 0:\n return trn_idx, tst_idx\n else:\n return trn_idx, tst_idx, val_idx\n '''\n if p_val != 0:\n return (tr_X, tr_y), (ts_X, ts_y), (vl_X, vl_y)\n return (tr_X, tr_y), (ts_X, ts_y), (vl_X, vl_y)\n\ndef gstandardize_data(X, X2, scaler_ty='minmax'):\n if scaler_ty == 'minmax':\n nrml = NORML()\n nrml.fit(X)\n Xr = nrml.transform(X)\n xrts = nrml.transform(X2)\n if scaler_ty == 'zstd':\n nrml = NORML(scaler_ty=scaler_ty)\n nrml.fit(X)\n Xr = nrml.transform(X)\n xrts = nrml.transform(X2)\n return Xr, xrts\n\ndef normalize(X, mu, std, min, max, type='z', copy=False):\n if type == 'z':\n return z_normalize(X, mu, std)\n elif type == 'mm':\n return min_max_normalize(X, min, max)\n\ndef z_normalize(X, mu, std):\n return pd.DataFrame((X - mu)/std, columns=X.columns)\n\ndef min_max_normalize(X, min, max):\n return pd.DataFrame((X - min)/(max - min), columns=X.columns)\n\n\n# =========================================================================\n# =========================================================================\n# TODO: Modeling tools\n# =========================================================================\n# =========================================================================\n#Classification Model\nclass CMODEL():\n \"\"\"a representation of a model for machine learning can in take in multiple data sets and perform\n a column wise merge\n \"\"\"\n def __init__(self, file_list, exclude_list, target, df=None, usecols=None, usecol_list=None, verbose=False, lcn=False,\n labeled=True, joins=('fips', 'fips', 'fips'), impute='drop', nas=(-999, ), drop_joins=False,st_vars=[],\n mtc=.0, mxcc=1, dim_red=None, split_type='tt', tr_ts_vl = (.6, .4, 0), normal =None, complx=False):\n self.normlz = normal\n self.target = target # the current models classification objective\n self.classes = list() # the class values for this model\n self.model_mean = None # the attribute mean values\n self.model_std = None # the attribute std values\n self.model_cov = None\n self.model_cov_det = None\n self.model_cov_inv = None\n self.class_splits = dict() # the data set split by class\n self.class_counts = dict() # a count for each class in the data set\n self.class_priors = dict() # the prior probaility of each class initialized by data set\n self.class_means = dict() # the attribute means for each class\n self.class_std = dict() # the attribute std for each class\n self.class_cov = dict() # the covariance matrix for each class\n self.class_cor = dict()\n self.class_cov_inv = dict() # the covariance matrix inverse for each class\n self.class_cov_det = dict() # the class covariance matrix determinant\n self.attribs = None # the names of the attributes by column\n self.excluded=None # the excluded variables, can be added back as onehot encoded versions\n self.data_set=None # holds the desired data set\n self.og_dataset= None # the merged set before any preprossing\n self.data_corr = None\n self.X = None # data or independent variables\n self.y = None # the target values or dependent variable\n self.Xtr_n=None\n self.Xts_n=None\n self.corr = None # the correlation matrix for the data\n self.Xfld = None # an fld transformed version of the data\n self.Xpca = None # a pca transformed version of the data\n self.dim_red = DimensionReducer() # the models dimension reducer\n self.Xts=None\n self.yts=None\n self.complx=complx\n self.process_files(file_list, exclude_list, target, usecols, usecol_list, verbose, labeled, joins,\n impute, nas, drop_joins=drop_joins, lcn=lcn, mtc=mtc, mxcc=mxcc, tr_ts_vl=tr_ts_vl,\n df=df, st_vars=st_vars)\n\n def process_files(self, file_list, exclude_list, target, usecols, usecol_list, verbose, labeled,\n joins=('fips', 'fips', 'fips'), impute='drop', nas=(-999,), drop_joins=False,\n lcn=False, mtc=.1, mxcc=.4, tr_ts_vl=(.6, .4, 0), df=None, to_encode=None, drops=None, st_vars=[]):\n df_list = list([])\n # go through and create and clean up data frames\n # dropping those in the exclude list\n doit=True\n if drops is None:\n tormv = list()\n else:\n tormv = drops\n # If there was a data frame passed\n if df is not None:\n self.og_dataset = df\n self.excluded = tormv\n if to_encode is not None:\n hold_over = df.low[:, to_encode]\n else:\n # loop to set up input do data merger\n for df, ex in zip(file_list, exclude_list):\n print('Data file:', df)\n print('adding to be excluding', ex)\n df_list.append(pd.read_excel(df))\n tormv += ex\n #if doit and len(ex) > 0:\n self.excluded = tormv\n self.og_dataset = data_merger(df_list, joins=joins, verbose=verbose, drop_joins=True, target=target)\n\n #print(self.og_dataset)\n merged = self.og_dataset.drop(columns=tormv, inplace=False)\n self.data_corr = merged.sort_values(by=target, axis='index', ascending=False).corr(method='kendall')\n if usecols is not None:\n merged = merged.loc[:, usecols]\n self.data_corr = merged.corr(method='kendall')\n if lcn:\n self.data_corr = merged.corr(method='kendall')\n lcn_d = LCN(self.data_corr, target=target)\n rl = HCTLCCL(lcn_d, st_vars, target=target, options=merged.columns.values.tolist(), target_corr_lim=mtc,\n cross_corr_lim=mxcc)\n merged = merged.loc[:, rl + [target]]\n print('columns used:')\n print(merged.columns)\n if impute == 'drop':\n for n in nas:\n merged.replace(n, np.nan) # this value is used by the SVI data set to represent missing data\n merged = merged.dropna()\n self.data_set = merged\n self.attribs = merged.columns.values.tolist()\n print(self.attribs)\n del self.attribs[self.attribs.index(self.target)]\n self.X = pd.DataFrame(merged.loc[:, self.attribs], columns=self.attribs)\n self.y = pd.DataFrame(merged.loc[:, self.target], columns=[self.target])\n # TODO: NOW SPLIT THE DATA INTO DESIRED NUMBER OF FOLDS\n targets0 = self.y[target]\n ts = tr_ts_vl[1] + tr_ts_vl[2]\n print('ts size', ts)\n tr = 1 - ts\n # Create training and testing sets for the data\n X_train0, X_test0, y_train0, y_test0 = train_test_split(self.X, targets0, stratify=targets0, test_size=ts,\n train_size=tr, )\n self.train_counts = y_train0.value_counts(normalize=True)\n self.test_counts = y_test0.value_counts(normalize=True)\n self.X, self.y = pd.DataFrame(X_train0, columns=self.attribs), pd.DataFrame(y_train0, columns=[self.target])\n self.Xts, self.yts = pd.DataFrame(X_test0, columns=self.attribs), pd.DataFrame(y_test0, columns=[self.target])\n self.N = self.X.shape[0]\n self.Nts = self.Xts.shape[0]\n self.d = self.X.shape[1]\n self.corr = self.X.corr()\n\n if self.normlz is not None:\n print('Normalize', self.normlz)\n self.X, self.Xts = standardize_data(self.X, self.Xts, scaler_ty=self.normlz)\n self.X = pd.DataFrame(self.X, columns=self.attribs)\n self.Xts = pd.DataFrame(self.Xts, columns=self.attribs)\n self.y.index = self.X.index\n self.yts.index = self.Xts.index\n print('y len', len(self.y.values))\n print('X len', len(self.X.values))\n self.grab_model_stats()\n # TODO: need to a some time move LCN stuff here\n # grab class specific stats\n self.calculate_class_stats()\n self.model_data = list((self.X, self.y)) # store data and labels together in lit\n\n\n\n def grab_model_stats(self):\n print(self.X.values)\n print(self.X)\n self.model_mean = self.X.values.mean(axis=0)\n self.model_std = self.X.values.std(axis=0).mean()\n self.model_cov = self.X.cov()\n if self.complx:\n self.model_cov_inv = np.linalg.inv(self.model_cov)\n self.model_cov_det = np.linalg.det(self.model_cov)\n def calculate_class_stats(self):\n self.classes = list(set(self.y[self.target]))\n print('classes', self.classes)\n for c in self.classes:\n self.splits_priors(c)\n self.class_means_std(c)\n if self.complx:\n self.class_cov_inv_det(c)\n self.class_cor[c] = self.class_splits[c].corr()\n def splits_priors(self,c):\n self.class_splits[c] = self.X.loc[self.y[self.target] == c, :]\n self.class_counts[c] = self.class_splits[c].shape[0]\n self.class_priors[c] = self.class_splits[c].shape[0] / self.N\n def class_means_std(self,c):\n self.class_means[c] = self.class_splits[c].values.mean(axis=0)\n self.class_std[c] = self.class_splits[c].values.std(axis=0)\n def class_cov_inv_det(self, c):\n self.class_cov[c] = self.class_splits[c].cov()\n if self.complx:\n self.class_cov_inv[c] = np.linalg.inv(self.class_cov[c].values)\n self.class_cov_det[c] = np.linalg.det(self.class_cov[c].values)\n def show_data_report(self):\n print('=======================================================================================')\n print('Train data size:', self.X.shape[0])\n print('y_train class distribution 0')\n print(self.train_counts)\n print('Test data size:', self.Xts.shape[0])\n print('y_test class distribution 0')\n print(self.test_counts)\n print('=======================================================================================')\n print('Features in Model:')\n print(self.X.columns.values)\n print('Predicting for {}'.format(self.target))\n def Reduce_Dimension(self, dr_type='FLD', pov=None, pc=None):\n if dr_type == 'FLD':\n self.perform_FLD()\n def perform_FLD(self):\n self.dim_red.fld_fit(self)\n self.Xfld = self.dim_red.FLD(self.X)\n self.tsXfld = self.dim_red.FLD(self.X)\n\nclass RMODEL():\n def __init__(self, X=None, Y=None, columns=None, impute=None, verbose=False, trtsspl=(.7, ),\n n_type=None):\n self.X = np.array(X)\n self.Y = np.array(Y)\n if columns is None:\n self.dataframe = pd.DataFrame(self.X)\n else:\n self.dataframe = pd.DataFrame(self.X, columns=columns)\n self.dataframe['target'] = Y\n self.Xtr = None\n self.ytr = None\n self.Xts = None\n self.yts = None\n self.train_test_split = trtsspl\n self.cross_val_split()\n self.columns = columns\n self.impute=impute\n self.verbose=verbose\n self.normalizer = NORML()\n self.n_type=n_type\n if n_type is not None:\n self.normalize()\n\n def cross_val_split(self):\n if self.train_test_split[0] == 0:\n self.Xtr = self.X\n self.Xts = self.X\n self.ytr = self.Y\n self.yts = self.Y\n return\n else:\n np.random.shuffle(np.array(self.X))\n\n\n def normalize(self, n_type='minmax'):\n self.normalizer.set_type(n_type=n_type)\n self.normalizer.fit(self.Xts)\n self.Xtr = self.normalizer.transform(self.X)\n self.Xts = self.normalizer.transform(self.Xts)\n\n\n\n\n# =========================================================================\n# =========================================================================\n# TODO: Dimension Reduction tools\n# =========================================================================\n# =========================================================================\n\nclass DimensionReducer():\n def __init__(self):\n self.type=None\n self.class_splits=None\n self.classes=None\n self.class_means=None\n self.data_means=None\n self.data_std = None\n self.class_cov=None\n self.class_cov_inv=None\n self.class_cov_det=None\n self.class_priors=None\n self.class_counts=None\n self.eig_vec = None\n self.eig_vals = None\n self.pval = None\n self.W = None\n self.WT = None\n self.WT2 = None\n self.k=None\n self.k_90 = None\n self.s = None\n self.vh = None\n self.i_l = list()\n self.dr_type = None\n self.y2 = None\n self.x2 = None\n self.x1 = None\n self.y1 = None\n self.N = None\n self.z, self.one = list(), list()\n\n def FLDA(self, df, dftr, y, classes=(0,1), class_label='type'):\n y = pd.DataFrame(y, columns=[class_label])\n c1 = dftr.loc[y[class_label] == classes[0], :]\n n1 = len(c1)\n c2 = dftr.loc[y[class_label] == classes[1], :]\n n2 = len(c2)\n #print('There are {0} negative and {1} positive samples'.format(n1, n2))\n Sw_inv = np.linalg.inv((n1-1)*c1.cov() + (n2-1)*c2.cov())\n #print(Sw_inv.shape)\n #print(c1.mean().shape)\n #w = np.dot(Sw_inv, np.dot((c1.mean() - c2.mean()), (c1.mean()-c2.mean()).transpose()))\n w = np.dot(Sw_inv, (c1.values.mean(axis=0) - c2.values.mean(axis=0)))\n #print('w', w.shape)\n #print('df', df.shape)\n return np.dot(df,w)\n def fld_fit(self, cmodel):\n self.dr_type = 'fld'\n self.attribs = cmodel.attribs\n self.class_splits = cmodel.class_splits\n self.class_counts = cmodel.class_counts\n self.classes = cmodel.classes\n self.class_means = cmodel.class_means\n self.data_means = cmodel.model_mean\n self.data_std = cmodel.model_std\n self.class_cov = cmodel.class_cov\n self.class_cov_inv = cmodel.class_cov_inv\n self.class_cov_det = cmodel.class_cov_det\n self.class_priors = cmodel.class_priors\n self.N=None\n self.kmm=None\n self.Calculate_W_FLD()\n def pca_fit(self, X):\n self.eig_vals, self.eig_vec = np.linalg.eig(X.cov())\n #print('eigvec', self.eig_vec)\n self.N = len(X)\n print('eigvals', self.eig_vals)\n def svd_w_np(self, X):\n u, s, vh = np.linalg.svd(X, full_matrices=False, compute_uv=True)\n self.s = s\n self.vh = vh\n self.N = len(X)\n self.d = X.shape[1]\n return\n def svd_pov(self, s, accuracy=.90, verbose=False, pov_plot=False, show_now=False):\n sum_s = sum(s.tolist())\n ss = s**2\n sum_ss = sum(ss.tolist())\n self.prop_list = list()\n found = False\n k = 0\n x1, y1, x2, y2, = 0, 0, 0, 0\n p_l, i_l = 0, 0\n found = False\n self.prop_list.append(0)\n self.i_l.append(0)\n for i in range(1, len(ss)+1):\n perct = sum(ss[0:i]) / sum_ss\n # perct = sum(s[0:i]) / sum_s\n if np.around(perct, 2) >= accuracy and not found:\n self.x1 = i\n self.y1 = perct\n found = True\n self.prop_list.append(perct)\n self.i_l.append(i)\n self.single_vals = np.arange(1, self.N + 1)\n if pov_plot:\n plt.figure()\n plt.plot(self.i_l, self.prop_list)\n plt.scatter(self.x1, self.y1, c='r', marker='o', label='Point at {:.1f}% accuracy'.format(self.y1*100))\n plt.title('Proportion of Variance vs. Number of Eigen Values\\n{:d} required for {:.2f}'.format(self.x1, self.y1*100))\n plt.legend()\n plt.xlabel('Number of Eigen values')\n plt.ylabel('Proportion of Variance')\n if show_now:\n plt.show()\n return self.x1 + 1\n def svd_fit(self,X, vh=None, k=None, get_pov=True, pov_thresh=.90, verbose=False, plot=False, usek=False,\n gen_plot=True, y=None):\n if vh is None:\n self.svd_w_np(X)\n u, s, vh = np.linalg.svd(X, full_matrices=False, compute_uv=True)\n if get_pov:\n print('getting pov')\n self.kmm = self.svd_pov(s, accuracy=pov_thresh, verbose=verbose, pov_plot=plot, show_now=True)\n if usek:\n k = self.kmm\n self.N = len(X)\n self.data_means = X.mean(axis=0).values.flatten()\n #print('data means', self.data_means)\n print('vector shape', vh.shape)\n #vt = np.transpose(self.vh)\n vt = np.transpose(self.vh)\n # grab the first k principle components\n if k is not None:\n self.W = vt[:, 0:k]\n self.k = k\n else:\n self.W = vt[:, :]\n self.k = len(X)\n self.WT = np.transpose(self.W)\n\n # grab the first two principle components\n W2 = vt[:, 0:2]\n W3 = vt[:, 0:3]\n self.WT2 = np.transpose(W2)\n self.WT3 = np.transpose(W3)\n if gen_plot:\n # get 0's and 1's\n for row, adp in zip(self.WT2, y):\n if adp == 0:\n self.z.append(row)\n else:\n self.one.append(row)\n def svd_transform(self, X, treD=False):\n z_array = list()\n z2_array = list()\n z3_array = list()\n for row in X.values:\n #print('row',row)\n #print('data means')\n #print(self.data_means)\n c_x = row - self.data_means\n z_array.append(np.dot(self.WT, c_x))\n z2_array.append(np.dot(self.WT2, c_x))\n if treD:\n z3_array.append(np.dot(self.WT3, c_x))\n Z = np.array(z_array, dtype=np.float)\n Z2 = np.array(z2_array, dtype=np.float)\n if treD:\n Z3 = np.array(z3_array, dtype=np.float)\n return Z, Z2, Z3\n return Z, Z2\n def Calculate_W_FLD(self, ):\n c1 = self.class_splits[0]\n n1 = self.class_counts[0]\n c2 = self.class_splits[1]\n n2 = self.class_counts[1]\n # TODO: evalute below to see if needed or not\n if False and type(c1) != type(pd.DataFrame({0:0})):\n c1 = pd.DataFrame(c1).values\n c2 = pd.DataFrame(c2).values\n Sw_inv = np.linalg.inv((n1 - 1) * c1.cov() + (n2 - 1) * c2.cov())\n self.W_fld = np.dot(Sw_inv, (c1.values.mean(axis=0) - c2.values.mean(axis=0)))\n\n def pca_transform(self, X, p=None):\n if p is None:\n return self.convert_basis(X, self.eig_vec)\n else:\n return self.convert_basis(X, self.eig_vec[0:p])\n\n def FLD(self, X):\n return pd.DataFrame(np.dot(X,self.W_fld))\n\n def PCA(self, df, pov, eig_vec=None, m=None, verbose=False, ret_level=0,\n pov_plot=False, show_now=False):\n # if not given eigen values calculate them based on the\n # desired proportion of variance (pov) covered\n if eig_vec is None:\n #print(df.cov())\n eig_vals, eig_vec = np.linalg.eig(df.cov())\n if m is None:\n pov_l, m = self.calculate_p(eig_vals=eig_vals, pov=pov, verbose=verbose, show_now=show_now,\n pov_plot=pov_plot)\n print('The Number of eigenvectors to cover {:.2f} of the variance is {:d}'.format(100*pov, m))\n else:\n pov_l, cm = self.calculate_p(eig_vals=eig_vals, pov=pov, verbose=verbose, show_now=show_now,\n pov_plot=pov_plot)\n print('The number of eigenvectors is set to {:d}'.format(m))\n #print(eig_vec)\n eig_vec = eig_vec[0:m]\n # now perform transform\n y = self.convert_basis(df, pd.DataFrame(eig_vec))\n if ret_level == 0:\n return y\n elif ret_level == 2:\n return pov_l, m, eig_vec, pd.DataFrame(y)\n elif ret_level == 1:\n return eig_vec, y\n\n def calculate_p(self, pov, eig_vals=None, verbose=False, pov_plot=False, show_now=False):\n #print('calculating k')\n # calculate total sum\n if eig_vals is None:\n eig_vals = self.eig_vals\n s_m = sum(eig_vals)\n if verbose:\n print('The sum of the eigen values is {0}'.format(s_m))\n print('The length of the eigen values vector is {0}'.format(len(eig_vals)))\n print('here it is')\n print(eig_vals)\n # now go through to find your required k for the\n # desired Proportion of Variance (pov)\n pov_l, cpov, csum, k, found = [], 0, 0, 0, False\n pov_found = 0\n for v in range(len(eig_vals)):\n csum += np.around(eig_vals[v], 2)\n pov_l.append(np.around(csum/s_m, 2))\n if verbose:\n print('The sum at {:d} is {:.2f}, pov {:.2f}'.format(v, csum, pov_l[v]))\n if pov_l[-1] >= pov and not found:\n k = v+1\n pov_found = pov_l[-1]\n print(k, pov_found)\n found = True\n if pov_plot:\n plt.figure()\n plt.plot(list(range(1, len(eig_vals)+1)), pov_l)\n plt.scatter(k, pov_found, c='r', marker='o', label='Point at {:.1f}% accuracy'.format(pov_found*100))\n plt.title('Proportion of Variance vs. Number of Eigen Values')\n plt.legend()\n plt.xlabel('Number of Eigen values')\n plt.ylabel('Proportion of Variance')\n if show_now:\n plt.show()\n\n self.pov_l = pov_l\n self.k = k\n return pov_l, k\n\n def convert_basis(self, df, new_basis):\n return np.dot(df, new_basis.transpose())\n\n def PCA_Eig(self, X, class_means):\n return 1\n\n def PCA_SVD(self, X):\n return 1\n\n# =========================================================================\n# =========================================================================\n# TODO: Learners\n# =========================================================================\n# =========================================================================\n\ndef epsilon(emax, emin, k, kmax):\n return emax * ((emin/emax)**(min(k, kmax)/kmax))\n\nclass Learner(ABC):\n \"\"\"Template class for a learning machine\"\"\"\n def __init__(self,):\n pass\n def finish_init(self):\n pass\n def fit(self, cmodel):\n pass\n def predict(self, X):\n pass\n def score(self, X, Y):\n pass\n\nclass bayes_classifiers(Learner):\n def __init__(self, cmodel=None, df_list=None, case=1):\n super().__init__()\n self.fit(cmodel=cmodel)\n self.case = case\n if self.case == 1:\n self.func = 'euclid'\n elif self.case == 2:\n self.func = 'mahala'\n if self.case == 1:\n self.func = 'euclid'\n elif self.case == 3:\n self.func = 'quadratic'\n def fit(self, cmodel):\n self.cmodel = cmodel\n\n def bayes_classifier_model_finder(self, dfx, dfy, case=1, func='euclid', scale= .1, verbose=False,\n priors=()):\n #n, p = list(), list()\n #f, b = .10, .90\n\n #while f <= b:\n # n.append(b)\n # p.append(f)\n # f += scale\n # b -= scale\n #back = n[0:]\n #rback = n[0:]\n #rback.reverse()\n #ford = p[0:-1]\n #rford = ford[0:]\n #rford.reverse()\n #pos = ford + rback\n #neg = back + rford\n\n if len(priors) == 0:\n pos, neg = self.generate_priors(scale=scale)\n else:\n if scale is not None:\n pos, neg = self.generate_priors(scale=scale, prior1=priors[0], prior2=priors[1])\n else:\n pos, neg = list([priors[0]]), list([priors[1]])\n #print(pos)\n #print(neg)\n best_acc, best_scr, best_posnegs, scr = 0, None, None, 0\n pr_l1, pr_l2, accuracy , sens, spec= list(), list(), list(), list(), list()\n best_pr = [0, 0]\n for ps, ng in zip(pos, neg):\n pr = [ps, ng]\n #print(pr[0] + pr[1])\n acc, scr, posnegs = self.bayes_classifier_predict_and_score(dfx, dfy, case=case, priors=pr, func=func, verbose=verbose)\n accuracy.append(acc)\n pr_l1.append(pr[0])\n pr_l2.append(pr[1])\n sens.append(posnegs['sen'])\n spec.append(posnegs['spe'])\n if acc > best_acc:\n best_acc = acc\n best_scr = scr\n best_posnegs = posnegs\n best_pr[0] = pr[0]\n best_pr[1] = pr[1]\n\n result_dic = {'best_accuracy':best_acc,\n 'best_scores':best_scr,\n 'best_postnegs':best_posnegs,\n 'pr_l1':pr_l1,\n 'pr_l2':pr_l2,\n 'sens':sens,\n 'spec':spec,\n 'accuracy_list':accuracy,\n 'best_priors': best_pr}\n\n #return best_acc, best_scr, best_posnegs, pr_l1, pr_l2, sens, spec, accuracy, best_pr\n return result_dic\n\n def bayes_classifier_model_finderB(self, dfx, dfy, case=1, func='euclid', scale= .1, verbose=False,\n priors=()):\n #n, p = list(), list()\n #f, b = .10, .90\n\n #while f <= b:\n # n.append(b)\n # p.append(f)\n # f += scale\n # b -= scale\n #back = n[0:]\n #rback = n[0:]\n #rback.reverse()\n #ford = p[0:-1]\n #rford = ford[0:]\n #rford.reverse()\n #pos = ford + rback\n #neg = back + rford\n\n if len(priors) == 0:\n pos, neg = self.generate_priors(scale=scale)\n else:\n if scale is not None:\n pos, neg = self.generate_priors(scale=scale, prior1=priors[0], prior2=priors[1])\n else:\n pos, neg = list([priors[0]]), list([priors[1]])\n print(pos)\n print(neg)\n best_acc, best_scr, best_posnegs, scr = 0, None, None, 0\n pr_l1, pr_l2, accuracy , sens, spec= list(), list(), list(), list(), list()\n best_pr = [0, 0]\n for ps, ng in zip(pos, neg):\n pr = [ps, ng]\n #print(pr[0] + pr[1])\n acc, scr, posnegs = self.bayes_classifier_predict_and_scoreB(dfx, dfy, case=case, priors=pr, func=func, verbose=verbose)\n accuracy.append(acc)\n pr_l1.append(pr[0])\n pr_l2.append(pr[1])\n sens.append(posnegs['sen'])\n spec.append(posnegs['spe'])\n if acc > best_acc:\n best_acc = acc\n best_scr = scr\n best_posnegs = posnegs\n best_pr[0] = pr[0]\n best_pr[1] = pr[1]\n\n result_dic = {'best_accuracy': best_acc,\n 'best_scores': best_scr,\n 'best_postnegs': best_posnegs,\n 'pr_l1': pr_l1,\n 'pr_l2': pr_l2,\n 'sens': sens,\n 'spec': spec,\n 'accuracy_list': accuracy,\n 'best_priors': best_pr}\n\n # return best_acc, best_scr, best_posnegs, pr_l1, pr_l2, sens, spec, accuracy, best_pr\n return result_dic\n\n def bayes_classifier_predict(self, dfx, case=1, verbose=False, priors = [1,1], func='euclid'):\n ypred = list()\n case = self.case\n func = self.func\n # figure out the priors situation\n if priors is None:\n prior1 = self.cmodel.class_priors[0]\n prior2 = self.cmodel.class_priors[1]\n else:\n prior1 = priors[0]\n prior2 = priors[1]\n #print('====================================> Priors: ', prior1, prior2)\n\n # figure out what discriminant function to use\n if case == 1:\n func = 'euclid'\n #cov = self.gauss_params['std'] ** 2\n cov = self.cmodel.model_std ** 2\n print('cov', cov)\n elif case == 2:\n func = 'mahala'\n cov = self.cmodel.model_cov\n elif case == 3:\n func = 'quadratic'\n cov = [self.cmodel.class_cov[0], self.cmodel.class_cov[1]]\n\n #mu1 = self.gauss_params['mu_c1']\n #mu1 = self.gauss_params['mu_c1']\n #mu1 = self.Cmu_array[0]\n #mu2 = self.Cmu_array[1]\n mu1 = self.cmodel.class_means[0]\n mu2 = self.cmodel.class_means[1]\n #print(func)\n # make some predictions\n for xi in dfx.values:\n if case != 3:\n pc1 = self.discriminate_function(xi, mu1, cov, prior1, func=func)\n pc2 = self.discriminate_function(xi, mu2, cov, prior2, func=func)\n else:\n #print(func)\n pc1 = self.discriminate_function(xi, mu1, cov[0], prior1, func=func)\n pc2 = self.discriminate_function(xi, mu2, cov[1], prior2, func=func)\n if verbose:\n print('pc1',pc1)\n print('pc2',pc2)\n if pc1 > pc2:\n ypred.append(0)\n else:\n ypred.append(1)\n return ypred\n\n def bayes_classifier_predict_and_scoreB(self, dfx, dfy, case=1, priors=[1,1], func='euclid', verbose=False):\n case = self.case\n func = self.func\n ypred = self.bayes_classifier_predict(dfx, case=case, priors=priors, func=func, verbose=verbose)\n return self.bayes_classifier_score(dfy, ypred)\n\n def bayes_classifier_predict_and_score(self, dfx, dfy, case=1, priors=[1,1], func='euclid', verbose=False):\n case = self.case\n func = self.func\n ypred = self.bayes_classifier_predict(dfx, case=case, priors=priors, func=func, verbose=verbose)\n return self.bayes_classifier_score(dfy, ypred)\n\n def bayes_classifier_score(self, yactual, ypred, vals = [0,1]):\n return bi_score(ypred, yactual, vals, classes=vals)\n\n def generate_priors(self, scale, prior1=None, prior2=None):\n if prior1 is not None and prior2 is not None:\n # set up prior1 side\n if prior1 < prior2:\n l1 = list([1])\n l2 = list([.0001])\n while l1[-1] - scale >= prior1:\n l1.append(l1[-1] - scale)\n l2.append(1 - l1[-1])\n if l1[-1] - prior1 < 0:\n l1[-1] = prior1\n l2[-1] = 1 - prior1\n else:\n l1 = list([.0001])\n l2 = list([1])\n while l2[-1] - scale >= prior2:\n l2.append(l2[-1] - scale)\n l1.append(1 - l2[-1])\n if l2[-1] - prior2 < 0:\n l2[-1] = prior2\n l1[-1] = 1 - prior2\n return l1, l2\n\n l1 = list([0.001])\n l2 = list([1])\n while np.around(l1[-1] + scale, 3) < 1:\n l1.append(np.around(l1[-1] + scale, 3))\n l2.append(np.around(1 - l1[-1], 3))\n return l1, l2\n\n def predict(self, X, case=1, num_cls = 2, priors=(1,1)):\n return self.bayes_classifier_predict(X, case=1, verbose=False, priors=priors)\n #if num_cls == 2:\n # return self.generate_predictions_bi(X, case=case)\n\n def score(self, Ya, Yp):\n pass\n\n def dim_reduce(self, type='', attribs=()):\n pass\n\n def euclidian_disc(self, mu, x, cov, prior):\n return (-np.dot(x.transpose(), np.dot(mu, x))/cov**2) + np.log(prior)\n\n def mahalanobis_disc(self, mu, x, cov_inv, prior):\n return -np.dot(x.transpose(), np.dot(mu, x)) + np.log(prior)\n\n def quadratic_disc(self, mu, x, cov_inv, cov_det, prior):\n return -np.dot(x.transpose(), np.dot(mu, x)) + np.log(prior)\n\n def min_euclid(self, mean_ib, xvec, sig1, prior):\n return (np.dot(mean_ib.T, xvec) / sig1) - (np.dot(mean_ib.T, mean_ib) / (sig1 * 2)) + np.log(prior)\n #return -(np.sqrt((np.linalg.norm(xvec-mean_ib))))/(2*sig1) + np.log(prior)\n\n def min_mahalanobis(self,mu,x,siginv,prior):\n return np.dot(mu.T,np.dot(siginv.T, x)) - (.5 * np.dot(mu.T, np.dot(siginv, mu))) + np.log(prior)\n def quadratic_machine(self, x, mu, siginv, detsig, prior):\n return (-.5 * np.dot(x.T, np.dot(siginv, x))) + np.dot(mu.T, np.dot(siginv.T, x)) - (.5*np.dot(mu.T, np.dot(siginv, mu))) - (.5*np.log(detsig))+np.log(prior)\n #return (-.5 * np.dot(x.T, np.dot(siginv, x))) + np.dot(np.dot(siginv, mu).T, x) - (.5*np.dot(mu.T, np.dot(siginv, mu))) - (.5*np.log(detsig))+np.log(prior)\n\n def generate_predictions_bi(self,X, case=1):\n y = list()\n for x in X:\n # get the posterior probability of\n # and set the class as the MPP\n c1 = self.case_picker(X, case, 0)\n c2 = self.case_picker(X, case, 1)\n if c1 > c2:\n y.append(0)\n else:\n y.append(1)\n return y\n\n def case_picker(self, X, case, class_val):\n case = self.case\n if case == 1:\n return self.min_euclid(self.cmodel.class_means[class_val], X, self.cmodel.model_std**2,\n self.cmodel.class_priors[class_val])\n #return self.euclidian_disc(self.cmodel.class_means[class_val], X, self.cmodel.,\n # self.cmodel.class_priors[class_val])\n elif case == 2:\n return self.mahalanobis_disc(self.cmodel.class_means[class_val], X, self.cmodel.model_cov_inv,\n self.cmodel.class_priors[class_val])\n elif case == 3:\n return self.quadratic_disc(self.cmodel.class_means[class_val], X, self.cmodel.class_cov_inv[class_val],\n self.cmodel.class_cov_det[class_val], self.cmodel.class_priors[class_val])\n\n def discriminate_function(self, df, mu, cov, prior, func='euclid', verbose=False):\n func = self.func\n if func.lower() == 'euclid':\n #print('euclid')\n if verbose:\n print('X:\\n', df)\n print('mu:\\n', mu)\n print('std:', cov)\n return self.min_euclid(mu, df, cov, prior)\n elif func.lower() == 'mahala':\n return self.min_mahalanobis(mu, df, np.linalg.inv(cov), prior)\n #print('mahala')\n elif func.lower() == 'quadratic':\n #print('quad')\n return self.quadratic_machine(df, mu, np.linalg.inv(cov), np.linalg.det(cov), prior)\n\nclass clusters():\n \"\"\"Represents a group of clusters\"\"\"\n def __init__(self, k, method='kmeans', init='random', df=None, distance_metric='dmin',\n distance_calc='euclid', verbose=True, distance_type='city_block'):\n self.methods = ['kmu', 'wta', 'kohonen']\n self.init_types = ['random', 'random_sample', 'normal']\n self.k = k # desired number of clusters\n self.df = df # the data frame I was given\n self.del_thrsh = .09\n self.dist_type = distance_type\n self.distance_metric = distance_metric # what type of distance metric used\n self.distance_calc = distance_calc # how to calculate the distance\n #self.data = self.df.values # the numpy array of my data\n self.size = df.shape[0] # the number of samples in the data\n self.dimen = df.shape[1] # the number of features\n self.method = method # the clustering method\n print('method:', method)\n self.top_grid = list()\n self.epochs = 0\n self.emax = 1\n self.emin = .0001\n self.kmax =100\n self.time_taken = 0\n self.std = int(np.around(df.values.std(axis=0).mean(),0))\n self.mu = int(np.around(df.values.mean(axis=0).mean(),0))\n print('std ', self.std)\n print('mu ', self.mu)\n self.init = init # the method of initializing clusters\n self.dist_LUT = None\n #if distance_metric in ['dmin', 'dmax']:\n # print('dminmax')\n # self.dist_LUT = self.calculate_distance_LUT(df.values) # generate look up table of distances\n self.my_clusters = None\n self.my_clusters = self.check_method(df.values) # the list of clusters initialized\n if verbose:\n print('There are {:d} clusters to start'.format(self.check_size()))\n\n def set_threshold(self, ):\n if self.distance_metric in ['dmax', 'dmean']:\n return -9999\n elif self.distance_metric == 'dmin':\n return 9999\n\n def perform_dist_test(self, threshold):\n if self.distance_metric == 'dmin':\n pass\n def calculate_needed_dist(self, Apt, Bpts):\n na_row = self.dist_LUT[Apt]\n # if we are looking for dmin (minimum distanct between clusters)\n if self.distance_metric == 'dmin':\n bpt, distance = get_select_min_idx(na_row, Bpts)\n return distance\n # if we are looking for dmin (minimum distanct between clusters)\n elif self.distance_metric == 'dmax':\n bpt, distance = get_select_max_idx(na_row, Bpts)\n return distance\n def update_means(self, cls, data):\n for n in cls:\n if len(cls[n][1]) > 0:\n cls[n][0] = np.array(np.around(data[cls[n][1]].mean(axis=0), 0), dtype=np.int)\n return cls\n def perform_epoch(self):\n threshold = self.set_threshold()\n clusterA, clusterB = 0, 0\n # for each cluster look up the distance between it's inhabitants\n # and all other inhabitants\n for c1 in range(len(self.my_clusters)-1):\n for c2 in range(c1 + 1, len(self.my_clusters)):\n # grab the two cluster list of point the cover\n c1_pts = self.my_clusters[c1].inhabitants\n c2_pts = self.my_clusters[c2].inhabitants\n\n #for p1 in self.my_clusters[c1].inhabitants:\n # go through cluster 1's points and look at the distance\n # between each of those, and each of the ones in the\n # current other cluster\n for p1 in c1_pts:\n better, distance, = self.calculate_needed_dist(threshold=threshold, Apt=p1, Bpts = c2_pts)\n\n\n # grab the 2nd clusters points\n #for p2 in self.my_clusters[c2].inhabitants:\n # # compare to current threshold and if it is better\n # if self.dist_LUT[p1][p2] > threshold:\n # pass\n\n # and every other clusters inhabitants\n # based on whether we are looking at d max\n # dmin or dmean keep track of the shortest one\n # and whitch two clusters this involves\n # once done merge the two with min distance and repeat\n # until desired number of clusters is found\n def check_size(self):\n return len(self.my_clusters)\n def merge(self, c1i, c2i, verbose=True):\n \"\"\"Hopefully will merge the two clusters\"\"\"\n c1 = self.my_clusters[c1i]\n c2 = self.my_clusters[c2i]\n # get the average for the new cluster\n self.my_clusters[c1i].value = np.stack(c1.value, c2.value).mean(axis=0)\n c1.inhabitants += c2.inhabitants\n if verbose:\n print('the merged inhabitants are ')\n print(c1.inhabitants)\n quit(-745)\n return\n def calculate_distance_LUT(self, data):\n \"\"\"Will Create a look up table for the distance\n from each point to every other thing\n \"\"\"\n tstart = time.time()\n adj = list(([[0]*self.size]*self.size))\n for row in range(self.size):\n for col in range(self.size):\n if row == col:\n if self.distance_metric == 'dmin':\n adj[row][col] = 9000\n elif self.distance_metric == 'dmax':\n adj[row][col] = -9000\n else:\n if self.dist_type == 'city_block':\n #print('city block')\n adj[row][col] = np.linalg.norm(data[row]-data[col])\n # dc[i2] = np.linalg.norm(self.data[i1] - self.data[i2])\n # rd[i1] = sort_dict(dc)\n #pd.DataFrame(adj[0:len(self.size)/4], dtype=np.int).to_excel('The_LUT.xlsx')\n print('Making the LUT took {}'.format(time.time()-tstart))\n return np.array(adj, dtype=int)\n def calculate_cluster_diffs(self, rl):\n for cls in range(len(rl)-1):\n for cls2 in range(cls+1, len(rl)):\n dis = np.linalg.norm((rl[cls].value - rl[cls2]))\n rl[cls].cluster_dist[cls2] = dis\n rl[cls2].cluster_dist[cls] = dis\n # sort the dictionary of distances by value\n for cls in range(len(rl)):\n rl[cls].cluster_dict = sort_dict(rl[cls].cluster_dict)\n return rl\n def dmin_merge(self):\n pass\n def dmax_merge(self):\n pass\n def dmean_merge(self):\n pass\n def merge_clusters(self):\n if self.distance_metric == 'dmin':\n self.dmin_merge()\n elif self.distance_metric == 'dmax':\n self.dmax_merge()\n elif self.distance_metric == 'dmean':\n self.dmean_merge()\n def epsilon(self, emax, emin, k, kmax):\n return emax * ((emin/emax)**(k/kmax))\n\n def wta_update_cls(self, cmean, X, verbose=False, epsln=.001):\n return cmean + epsln*(X - cmean)\n def wta_init_run(self, data, change_threshold=0.09):\n gaussrndm = get_truncated_normal(sd = self.std, mean=self.mu)\n rl = list()\n cls = dict()\n change_threshold = self.del_thrsh\n epsln = .1\n # initialize the clusters randomly with a\n # gaussian distribution of random numbers\n for l in range(self.k):\n #print(l)\n cls[l] = []\n cls[l].append(get_rounded_int_array(gaussrndm.rvs(3)))\n cls[l].append(list())\n #cls = self.update_means(cls, data)\n tot = 0\n #for c in cls:\n # print(cls[c])\n # print(c)\n # tot += len(cls[c][1])\n\n change = True\n tstart = time.time()\n # for each point calculate the distance and as you go keep track of the min\n # at end of loop add self to one with min distance\n # then adjust means and repeat until there or no more changes\n while change:\n change = False\n change_cnt = 0\n # for each sample pixel\n # find its nearest mean and put in its\n # cluster and adjust that cluster\n # mean toward the new point\n for sample in range(len(data)):\n dis = 999999\n best = None\n cnt = 0\n # go through all clusters\n for i in cls:\n # if using dmin or max\n if self.distance_metric in ['dmin', 'dmax']:\n if len(cls[i][1]) == 0:\n cdis = np.linalg.norm(cls[i][0] - data[sample])\n elif self.distance_metric == 'dmin':\n if cnt == 0:\n print('dmin')\n tmpd = 99999\n for pt in cls[i][1]:\n if pt != sample:\n dp = np.linalg.norm(data[sample]-data[pt])\n if dp < tmpd and dp != 9999:\n #print(dp, tmpd)\n tmpd = dp\n best = i\n cdis = tmpd\n #if cnt == 0:\n # print('the min dis is {}'.format(cdis))\n cnt += 1\n elif self.distance_metric == 'dmax':\n tmpd = -99999\n for pt in cls[i][1]:\n dp = np.linalg.norm(data[sample]-data[pt])\n if dp > tmpd:\n tmpd = dp\n cdis = tmpd\n elif self.dist_type == 'city_block':\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n else:\n if len(cls[i][1]) <= 1:\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n elif np.linalg.cond(cls[i][1]) < 1 / sys.float_info.epsilon:\n cov = np.linalg.inv(cls[i][1])\n else:\n cov = pd.DataFrame(data[cls[i][1]]).std(axis=0).mean().values\n cov = cov**2\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov, is_std=True)\n #print(cov)\n if self.epochs < 5:\n print('cov')\n print(cov)\n print(cov.shape)\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov)\n #cdis = mahalanobis_distance(data[sample], cls[i][0])\n cnt += 1\n #cdis = np.linalg.norm((cls[i][0]- data[sample]))\n #print('evaluating {}'.format(cdis))\n if cdis < dis:\n dis = cdis\n best = i\n # if I am already in this cluster\n # keep going\n if sample in cls[best][1]:\n continue\n else:\n change = True\n change_cnt += 1\n # find where the sample was and remove it\n for n in cls:\n if sample in cls[n][1]:\n del cls[n][1][cls[n][1].index(sample)]\n break\n cls[best][1].append(sample)\n # now update the center\n cls[best][0] = self.wta_update_cls(cls[best][0], data[sample], epsln=epsln)\n # once we are done with this run adjust means\n # cls = self.update_means(cls, data)\n # at end of loop see what % of points changed\n # if less than threshold stop\n if self.epochs > 0 and self.epochs%1 == 0:\n #epsln = epsln *.1\n epsln = self.epsilon(emax=.1, emin=.0001, k=self.epochs, kmax=30)\n print('-----------------------------epsilon', epsln)\n if (change_cnt/self.size) < change_threshold:\n change = False\n print('the threshold was hit {}'.format(change_cnt/self.size))\n elif self.epochs%50 == 0:\n print('{0} points changed or {1}%, {2}'.format(change_cnt, (change_cnt/self.size), epsln))\n\n self.epochs += 1\n print('Epoch {:d}, changed {:d}'.format(self.epochs, change_cnt))\n self.time_taken = time.time() - tstart\n return self.rescale_ppm(data, cls)\n\n def kmean_init_run(self, data, change_threshold=.001):\n gaussrndm = get_truncated_normal(sd = self.std, mean=self.mu)\n rl = list()\n cls = dict()\n #change_threshold = self.del_thrsh\n rdch = np.random.choice(range(self.size), self.size, replace=False)\n start = 0\n end = int(self.size/self.k)\n step = int(self.size/self.k)\n #print('step 886', step)\n for l in range(self.k):\n #print(l)\n cls[l] = []\n cls[l].append(get_rounded_int_array(gaussrndm.rvs(3)))\n cls[l].append(list())\n for i in range(start, end):\n cls[l][1].append(rdch[i])\n start = end\n end = min(end + step, self.size)\n # initialize the means based on whats in the\n cls = self.update_means(cls, data)\n #tot = 0\n #for c in cls:\n # #print(cls[c])\n # #print(c)\n # tot += len(cls[c][1])\n #print('total', tot)\n change = True\n tstart = time.time()\n # for each point calculate the distance and as you go keep track of the min\n # at end of loop add self to one with min distance\n # then adjust means and repeat until there or no more changes\n self.epochs = 0\n print('Starting the while loop')\n epsln = .1\n while change:\n change = False\n change_cnt = 0\n # perform the epoch\n # for every sample\n for sample in range(len(data)):\n dis = 999999\n best = None\n change_cnt = 0\n # go through current means\n # and find the closest\n for i in cls:\n if self.distance_metric in ['dmin', 'dmax']:\n #print('dmin or max')\n if len(cls[i][1]) == 0:\n cdis = np.linalg.norm(cls[i][0] - data[sample])\n elif self.distance_metric == 'dmin':\n tmpd = 99999\n for pt in cls[i][1]:\n if pt != sample:\n dp = np.linalg.norm(data[sample] - data[pt])\n if dp < tmpd:\n tmpd = dp\n cdis = tmpd\n elif self.distance_metric == 'dmax':\n tmpd = -99999\n for pt in cls[i][1]:\n if pt != sample:\n dp = np.linalg.norm(data[sample] - data[pt])\n if dp > tmpd:\n tmpd = dp\n cdis = tmpd\n elif self.dist_type == 'city_block':\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n else:\n if len(cls[i][1]) <= 1 and np.linalg.cond(cls[i][1]) < 1 / sys.float_info.epsilon:\n cov = np.linalg.inv(cls[i][1])\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov)\n elif len(cls[i][1]) <= 1:\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n else:\n cov = pd.DataFrame(data[cls[i][1]]).std(axis=0).mean()\n cov = cov**2\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov, is_std=True)\n #print(cov)\n if self.epochs < 5:\n print('cov')\n print(cov)\n print(cov.shape)\n #cdis = mahalanobis_distance(data[sample], cls[i][0], cov)\n\n\n if self.epochs < 5:\n print('my mahala in kmean')\n if len(cls[i][1]) <= 1:\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n else:\n cov = pd.DataFrame(data[cls[i][1]]).cov().values\n print(cov)\n if self.epochs < 5:\n print('cov')\n print(cov)\n print(cov.shape)\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov)\n if cdis < dis:\n dis = cdis\n best = i\n # if I am already in the closest cluster\n # stay there and go to next sample\n if sample in cls[best][1]:\n continue\n # otherwise put the sample in it's\n # closest cluster after removing\n # it from its current one then\n # set that a change occurred\n # and keep track of how many\n else:\n change = True\n change_cnt += 1\n # find where the sample was and remove it\n for n in cls:\n if sample in cls[n][1]:\n del cls[n][1][cls[n][1].index(sample)]\n cls[best][1].append(sample)\n cls = self.update_means(cls, data)\n\n if self.epochs > 0 and self.epochs % 1 == 0:\n #epsln = epsln * .1\n epsln = self.epsilon(.1, .0001, self.epochs, 40)\n print('-----------------------------epsilon', epsln)\n if (change_cnt / self.size) < change_threshold:\n change = False\n print('the threshold was hit {}'.format(change_cnt / self.size))\n elif self.epochs % 50 == 0:\n print('{0} points changed or {1}%, {2}'.format(change_cnt, (change_cnt / self.size), epsln))\n # once we are done with this run adjust means\n self.epochs += 1\n print('Epoch {:d}, changed {:d}'.format(self.epochs, change_cnt))\n print('it took {} epochs'.format(self.epochs))\n self.time_taken = time.time() - tstart\n return self.rescale_ppm(data, cls)\n\n def phi(self, coord1, coordwinner, sig=1):\n if coord1 in [0,self.k-1] and coordwinner in [0, self.k-1] and coord1 != coordwinner:\n coord1, coordwinner = 1, 0\n return np.exp(-1*((((coord1 - coordwinner)**2)/(2*sig**2))))\n def kohonen_update_cls(self, cmeans, X, winner, verbose=False, epsln=.01, alpha=.0001):\n for i in range(len(cmeans)):\n cmeans[i][0] = cmeans[i][0] + epsln*self.phi(i, winner)*(X - cmeans[i][0])\n return cmeans\n def kohonen_init_run(self, data, change_threshold=.01):\n gaussrndm = get_truncated_normal(sd = self.std, mean=self.mu)\n rl = list()\n cls = dict()\n #change_threshold = self.del_thrsh\n epsln = .1\n tmp_dict = dict()\n vecs, dists = list(), list()\n # initialize the clusters randomly with a\n # gaussian distribution of random numbers\n for l in range(self.k):\n vecs.append(get_rounded_int_array(gaussrndm.rvs(3)))\n dists.append(np.linalg.norm(vecs[-1]))\n\n dists_sort = sorted(dists)\n vecs2 = list()\n for i in range(len(dists_sort)):\n idx = dists.index(dists_sort[i])\n dists[idx] = -99\n vecs2.append(vecs[idx])\n\n\n print(self.k)\n print(len(vecs2))\n #tmp_dict = sort_dict(tmp_dict, sort_by='keys')\n #print(tmp_dict)\n #mus = list(tmp_dict.values())\n #print(len(mus))\n #quit(-1104)\n\n # initialize the clusters randomly with a\n # gaussian distribution of random numbers\n for l in range(self.k):\n #print(l)\n cls[l] = []\n cls[l].append(vecs2[l])\n cls[l].append(list())\n\n #cls = self.update_means(cls, data)\n tot = 0\n #for c in cls:\n # print(cls[c])\n # print(c)\n # tot += len(cls[c][1])\n\n change = True\n tstart = time.time()\n # for each point calculate the distance and as you go keep track of the min\n # at end of loop add self to one with min distance\n # then adjust means and repeat until there or no more changes\n while change:\n change = False\n change_cnt = 0\n # for each sample pixel\n # find its nearest mean and put in its\n # cluster and adjust that cluster\n # mean toward the new point\n for sample in range(len(data)):\n dis = 999999\n best = None\n for i in cls:\n if self.dist_type == 'city_block':\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n else:\n if self.epochs < 5:\n print('')\n if len(cls[i][1]) <= 1:\n cdis = np.linalg.norm((cls[i][0]- data[sample]))\n elif np.linalg.cond(cls[i][1]) < 1 / sys.float_info.epsilon:\n cov = np.linalg.inv(cls[i][1])\n else:\n cov = pd.DataFrame(data[cls[i][1]]).std(axis=0).mean().values\n cov = cov**2\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov, is_std=True)\n #print(cov)\n if self.epochs < 5:\n print('cov')\n print(cov)\n print(cov.shape)\n cdis = mahalanobis_distance(data[sample], cls[i][0], cov)\n #cdis = mahalanobis_distance(data[sample], cls[i][0])\n\n if cdis < dis:\n dis = cdis\n best = i\n # if I am already in this cluster\n # keep going\n if sample in cls[best][1]:\n continue\n else:\n change = True\n change_cnt += 1\n # find where the sample was and remove it\n for n in cls:\n if sample in cls[n][1]:\n del cls[n][1][cls[n][1].index(sample)]\n break\n cls[best][1].append(sample)\n # now update the center\n cls = self.kohonen_update_cls(cls, data[sample], best, epsln=.001)\n # once we are done with this run adjust means\n # cls = self.update_means(cls, data)\n # at end of loop see what % of points changed\n # if less than threshold stop\n if self.epochs > 0 and self.epochs%10 == 0:\n #epsln = epsln *.1\n epsln = self.epsilon(.001, .0001, self.epochs, 40)\n print('-----------------------------epsilon', epsln)\n if (change_cnt/self.size) < change_threshold:\n change = False\n print('the threshold was hit {}'.format(change_cnt/self.size))\n elif self.epochs%50 == 0:\n print('{0} points changed or {1}%, {2}'.format(change_cnt, (change_cnt/self.size), epsln))\n self.epochs += 1\n print('Epoch {:d}, changed {:d}'.format(self.epochs, change_cnt))\n self.time_taken = time.time() - tstart\n return self.rescale_ppm(data, cls)\n\n def init_random_sample(self, ):\n print('rndm samp')\n print(self.df)\n cp = self.df.copy().values\n np.random.shuffle(cp)\n print(cp)\n new_clusters = cp[0:self.k]\n rl = list()\n # create the list of cluster objects\n for c in range(len(new_clusters)):\n rl.append(cluster(self.k, rc=c, value=new_clusters[c]))\n # calculate the cluster distances\n rl = self.calculate_cluster_diffs(rl)\n return rl\n def normal(self, ):\n pass\n def check_init(self, data):\n if self.init is 'random' and self.method == 'kmeans':\n # generate k random 1X3 list\n # that are from 0 -256\n print('kmeans')\n return self.kmean_init_run(data)\n elif self.init is 'random' and self.method == 'wta':\n print('wta')\n # generate k random 1X3 list\n # that are from 0 -256\n return self.wta_init_run(data)\n elif self.method == 'kohonen':\n print('kohonen')\n return self.kohonen_init_run(data)\n elif self.init is 'normal':\n pass\n\n def algo_init(self, data, verbose=True):\n \"\"\"Will initialize the clusters to just\n start as the different samples\n \"\"\"\n #rl = list([[0]*self.dimen]*self.size)\n rl = list()\n # create a cluster for every row\n # to start\n for row in range(len(data)):\n rl.append(cluster(k=self.k, rc=row, value=data[row]))\n rl[-1].inhabitants.append(data[row])\n return rl\n\n def adjust_pic(self, df, cls):\n for cl in cls:\n df[cl] = df[cl].mean(axis=0)\n return df\n\n def find_my_cluster(self, pt, cls):\n for cl in range(len(cls)):\n if pt in cls[cl]:\n return cl\n return None\n\n def algo_init2(self, data, verbose=True):\n rl = list()\n print('initializing for algorithmic cluster')\n for row1 in range(len(data)-1):\n #if row1 > 20:\n # break\n for row2 in range(row1+1, len(data)):\n if row1 != row2:\n rl.append([ np.linalg.norm(data[row1] - data[row2]), int(row1), int(row2)])\n\n #dummy = np.array(rl[0:10])\n #if verbose:\n # print('the dummy is ')\n # print(dummy)\n #dummy2 = col_sort(dummy, 0)\n #if verbose:\n # print('the dummy2 is ')\n # print(dummy2)\n\n rl = col_sort(np.array(rl))\n print('it')\n sound_alert_file(r'')\n print(rl[0:5])\n cls = list([[]]*self.size)\n\n for idx in range(self.size):\n cls[idx].append(idx)\n for edge in rl:\n cl1 = self.find_my_cluster(edge[1], cls)\n cl2 = self.find_my_cluster(edge[2], cls)\n if cl1 == cl2:\n #already in the same group\n continue\n else:\n cls.append(cls[cl1] + cls[cl2])\n del cls[cl1]\n del cls[cl1]\n if len(cls) == self.k:\n break\n\n return self.adjust_pic(data, cls)\n\n def check_method(self, data, verbose=False):\n if self.method is 'algo':\n if verbose:\n print('checked, algo')\n return self.algo_init2(data)\n else:\n return self.check_init(data)\n def rescale_ppm(self, df, cls):\n new_image = None\n # for all of my clusters\n # go through the pixels that belong to it\n # and change thier color values to the clusters\n # color values\n for c in cls:\n val = cls[c][0]\n to_fix = cls[c][1]\n df[to_fix] = val\n #for clstr in self.my_clusters:\n # for pix in clstr.inhabitants:\n # self.df[pix] = clstr.value\n return df\nclass cluster():\n \"\"\"Represents an individual cluster\"\"\"\n def __init__(self, k, rc, value):\n self.k = k # number of sibling cluster\n self.value = value # the current value of the mean I hold\n self.row=rc # the row in the toplogical grid I'm in.\n self.cluster_dist = dict() # distances to the other clusters\n self.inhabitants = list() # the row number of samples in this cluster\n def get_size(self):\n return len(self.inhabitants)\n def k_mean_calculate_mean(self, df):\n self.value = df[self.inhabitants, ].mean(axis=0)\n def wta_calculate_mean(self, df, eta):\n pass\n def kohonen_calculate_mean(self, df, eta, phi, pho_std):\n pass\n\nclass cluster_algos():\n \"\"\"My collection of clustering algorithms\"\"\"\n def __init__(self, df, method='algo', init='random', k=None, distance_type='city_block', distance_metric='dmean'):\n self.df = df # the data we will be working with\n self.my_clusters = clusters(k=k, df=self.df, method=method, init=init, distance_type=distance_type, distance_metric=distance_metric) # my collection of clusters\n self.k = k # desired number of clusters\n def algorithmic_cluster(self, ):\n \"\"\"Algorithmic clustering\"\"\"\n\n # while the number of clusters is < k\n cnt = 0\n # do my algorithmic thing yo !!!\n # i.e. run throu some number of epochs or until the desired\n # number of k's is reached\n while self.my_clusters.check_size() > self.k:\n if cnt%(1000) == 0: # shows every thousandth cluster\n print('There are {} clusters'.format(self.my_clusters.check_size()))\n cnt += 1\n # tell the clusters to perform and epoch\n # this will conjoin the closest groups\n # two at a time\n self.my_clusters.perform_epoch()\n # TODO: create a conversion method to\n # convert old image into rescaled one\n self.my_clusters.rescale_ppm(self.df)\n\n def finish_init(self):\n pass\n def fit(self, cmodel):\n pass\n def predict(self, X):\n pass\n def score(self, X, Y):\n pass\n\n # takes the known or Training set and the testing set\n def calculate_distances(self, cluster_means, samples, dist_dic):\n distances_dict = {}\n\n # iterate through samples of test set\n # calculating the distances between each\n # sample and all other samples in the training set\n for sample1 in range(len(samples)):\n # print(df.iloc[sample1, :])\n # print('==================================')\n # print('==================================')\n # print('==================================')\n\n # create a dictionary for this sample this will store the distances\n distances_dict[sample1] = {}\n for sample2 in range(len(cluster_means)):\n # calculate the distance and store it in the dictionary for this entry\n # print(df.iloc[sample2, :])\n # print(np.linalg.norm(df_te.iloc[sample1, :].values - df_tr.iloc[sample2, :].values))\n # print(self.euclidian_dist(df_tr.iloc[sample2, :].values, df_te.iloc[sample1, :].values))\n # distances_dict[sample1][sample2] = self.euclidian_dist(df_tr.iloc[sample2, :].values, df_te.iloc[sample1, :].values)\n distances_dict[sample1][sample2] = np.linalg.norm(\n samples.iloc[sample1, :].values - cluster_means.iloc[sample2, :].values)\n\n # distances_dict[sample1] = sorted(distances_dict[sample1].items(), key=lambda kv: kv[1])\n distances_dict[sample1] = dict(sorted(distances_dict[sample1].items(), key=operator.itemgetter(1)))\n # distances_dict[sample1] = sorted(distances_dict[sample1].items(), key=operator.itemgetter(1))\n # print(distances_dict[sample1])\n return distances_dict\n\nclass Gknn():\n def __init__(self, k=10, dist_metric='euclidean'):\n self.k=k\n self.X=None\n self.y=None\n self.cov=None\n self.inv_cov=None\n self.dist_metric=dist_metric\n def fit(self, X, y):\n self.y = y\n self.X = X\n self.cov = X.cov()\n self.inv_cov = np.linalg.inv(self.cov)\n\n def predict(self, X):\n dist_dic = self.calculate_distances(X)\n real = self.y.values.flatten().tolist()\n candidates = list(set(real))\n final_tallys = list()\n projected = list()\n yp = list()\n for zone in dist_dic:\n nn = list(dist_dic[zone].keys())\n votes = self.y.values[nn, :].flatten().tolist()\n ballot = {}\n for c in candidates:\n ballot[c] = votes.count(c)\n yp.append(sort_dict(ballot) )\n\n\n def calculate_distances(self, X):\n cov=None\n if type_check(X, against='dataframe'):\n cov = self.X.cov()\n else:\n cov = pd.DataFrame(X).cov()\n\n ret_dict = {}\n for i in range(len(X)):\n cdl = {}\n for j in range(len(self.X)):\n #cdl[j] = mahalanobis_distance(X[i], self.X.values[j], )\n #cdl[j] = np.linalg.norm(X[i]-self.X.values[j])\n cdl[j] = self.get_distance(X[i], j)\n cdl = sort_dict(cdl)\n cnt = 0\n ndl = {}\n for ky in cdl:\n ndl[ky] = cdl[ky]\n cnt += 0\n if cnt == self.k:\n break\n ret_dict[i] = ndl\n return ret_dict\n\n def get_distance(self, x, j):\n if self.dist_metric == 'city_block':\n return np.linalg.norm(x - self.X.values[j])\n elif self.dist_metric == 'mahalanobis':\n return mahalanobis_distance(x, self.X.values[j], self.inv_cov)\n else:\n return euclidean_distance(x, self.X.values[j], np.mean(self.X.values.std(axis=0)))\n\n\n\n\n\n\nclass G_NN():\n def __init(self):\n self.data=None\n self.X = None\n self.y = None\n self.Trainset= None\n self.Testset=None\n\n\nclass naive_bayes_classifier():\n def __init__(self, classifiers=(), verbose=True):\n self.cms = None\n self.LUT = None\n self.clsf = [clf for clf in classifiers]\n self.accuracies = list()\n # will fit all classifiers with given training\n # set and generate confusion matrices for each\n def fit(self, X, y, verbose=False):\n for clf in range(len(self.clsf)):\n self.clsf[clf].fit(X,y)\n yp = self.clsf[clf].predict(X)\n self.accuracies.append(accuracy_score(y, yp))\n self.cms = self.generate_cms(X, y)\n self.LUT = self.naive_bayes_cm_fnc(self.cms)\n\n def generate_cms(self, X, y, verbose=False):\n \"\"\"\n will generate the confusion matrices for the baysian\n classifier\n :param X: predictor\n :param y: target\n :param verbose:\n :return: list of confusion matricies, entry i is classifier i's\n \"\"\"\n return [confusion_matrix(y, clsf.predict(X)) for clsf in self.clsf]\n\n def set_cms(self, cms):\n for clf in range(len(self.clsf)):\n self.clsf[clf].cm = cms[clf]\n\n def predict(self, x):\n \"\"\" Will go through classifiers, getting prediction lists\n :param x:\n :return:\n \"\"\"\n # will contain a list of lists where list i is for\n # observation i, and the contents of list i are\n # the predictions for observation i from the classifiers\n predictions = list()\n\n if type(x) == type(pd.DataFrame([])):\n print('fix the object')\n x = x.values\n\n for obs in range(len(x)):\n # create list i\n predictions.append(list())\n # move through classifiers generating predictions for\n # observation i\n for clsf in self.clsf:\n predictions[obs].append(clsf.predict([x[obs]]))\n #print(predictions[obs])\n tl = []\n # now go through the predictions for each observation using the\n # look up table to make the final prediction\n yp = list()\n for obs in range(len(predictions)):\n #print(self.LUT[predictions[obs][0], predictions[obs][1]].tolist())\n yp.append( self.LUT[predictions[obs][0], predictions[obs][1]].tolist()[0].index(max(self.LUT[predictions[obs][0], predictions[obs][1]].tolist()[0])))\n\n #print(yp)\n return yp\n\n def naive_bayes_cm_fnc(self, conmats, verbose=True):\n # print(cm1.transpose().reshape((9,)))\n # print(cm1[0:, 0])\n # print(cm2[0:, 0])\n # print(cm1[0:, 0]*cm2[0:,0])\n\n nbcml = [] # naive bayes confusion matrix list\n for cms in conmats:\n nbcml.append(self.nb_cm(cms))\n nb1 = self.nb_cm(conmats[0]) # confusion matrix object 1\n nb2 = self.nb_cm(conmats[1]) # confusion matrix object 2\n\n if False:\n print('cm1')\n print(nb1.cm)\n print('cm2')\n print(nb2.cm)\n print('prob table 1')\n print(nb1.prob_table)\n print('prob table 2')\n print(nb2.prob_table)\n print('cm1:')\n print(cm1)\n print('cm2:')\n print(cm2)\n print('product')\n print(cm1 * cm2)\n\n# shp = cm1.shape[0]\n# shp = nbcml[0].cm.shape[1]\n shp = conmats[0].shape[0]\n # create empty look up table\n tupparam = tuple([shp for i in range(shp+1)])\n look_up = np.empty(tupparam)\n print(look_up)\n\n # print(nb_mat)\n # nb_mat[0,0] = cm1[:,0]*cm2[:,0]\n # print(nb_mat[0,0,:])\n # print(nb_mat[0,1,:])\n\n for col1 in range(shp):\n for colb in range(shp):\n look_up[col1, colb] = nb1.prob_table[:, col1] * nb2.prob_table[:, colb]\n return look_up\n\n class nb_cm():\n \"\"\"represents a confusion matrix from a classifier\"\"\"\n def __init__(self, cm):\n self.cm = cm # the confusion matrix stored\n self.row_sums = self.calc_row_sums() # the row sums (class counts) for each class\n self.prob_table = self.create_prob_table() # probability table used to make look up table\n # counts the number of each class in the confusion matrix\n def calc_row_sums(self):\n return [sum(r) for r in self.cm]\n # probability table created from confusion matrix\n def create_prob_table(self):\n return self.cm / self.row_sums\n\nclass GLIN_Regressor(Learner):\n def __init__(self, X, Y, Xts, Yts, w=None, c = 0, intercept=True, eta=.0005, etamin=.000001,\n etamax=.0001, kmax=200, eta_dec=True, epochs=90000, cost_func='mse', wgt='zero', tol=1e-3, lm=4):\n super().__init__()\n self.data = None\n self.intercept = intercept\n self.w = w\n self.wgt = wgt\n self.c = c\n self.X = X\n self.Y = Y\n self.Xts = Xts\n self.Yts = Yts\n self.tol = tol\n self.Ymean = Y.mean(axis=0)\n self.test_epochs = None\n self.test_mae = None\n self.test_cod = None\n self.test_rmse = None\n #print(self.Ymean)\n self.Ymeants = Yts.mean(axis=0)\n #print(self.Ymeants)\n self.best_MAE = 999999\n self.N = len(X)\n print('N:', self.N)\n self.d = X.shape[1]\n self.eta = eta\n self.epochs = epochs\n self.etamax=etamax\n self.etamin=etamin\n self.kmax=kmax\n self.eta_dec=eta_dec\n self.cost_fnc = cost_func\n self.p_scores = None\n self.Vif_scores = None\n self.Rsqr = None\n self.wald_chi = None\n self.best_w = None\n self.best_b = None\n self.best_MSE = 200000000000\n self.best_RMSE = 200000000000\n self.best_COD = 200000000000\n self.best_MAE = 200000000000\n self.best_Rsqr = 20000000000\n self.epoch_stop = -99\n self.lm = lm\n self.finish_init()\n\n # performs multiple linear regression on the x and y data\n # and returns the generated parameter vector W\n def multi_linear_regressor(self, x_data, y_data):\n x = np.array(x_data, dtype=np.float)\n y = np.array(y_data, dtype=np.float)\n x_transpose = np.transpose(x)\n xtx = np.dot(x_transpose, x)\n xtx_inv = np.linalg.inv(xtx)\n xtx_inv_xt = np.dot(xtx_inv, x_transpose)\n w = np.dot(xtx_inv_xt, y)\n return w\n\n def wgt_predict(self, wgt, X):\n return np.dot(X.transpose(), wgt)\n\n def sigmoid(self, X):\n return 1/(1-np.e**(-X))\n\n def finish_init(self):\n \"\"\"\n Sets up the weights and b\n :return:\n \"\"\"\n if self.w is None:\n if self.wgt == 'random':\n self.w = get_truncated_normal(mean=0, sd=1, low=-1, upp=1).rvs(self.d)\n elif self.wgt== 'zero':\n self.w = np.array([0] * self.d)\n elif self.wgt == 'ols':\n self.w = np.dot(np.linalg.inv(np.dot(self.X.transpose(), self.X)), self.X.transpose())\n self.w = np.dot(self.w, self.Y)\n \"\"\"\n if self.intercept:\n if self.wgt == 'random':\n # get a normally distributed randomized weight vector\n self.w = get_truncated_normal(mean=0, sd=1, low=-1, upp=1).rvs(self.d)\n self.w = np.array(self.w + [1])\n else:\n #b = list([[1]]*(self.N)) # add intercept\n #self.X = np.hstack((self.X, b))\n #self.d = self.d + 1\n #b = list([[1]]*(len(self.Xts))) # add intercept\n #self.Xts = np.hstack((self.Xts, b))\n else:\n self.w = get_truncated_normal(mean=0, sd=1, low=-1, upp=1).rvs(self.d)\n self.w = np.array(self.w)\n self.wd = np.dot(np.linalg.inv(np.dot(self.X.transpose(), self.X)), self.X.transpose())\n self.wd = np.dot(self.wd, self.Y)\n print('wd shape', self.wd.shape)\n print(self.wd)\n \"\"\"\n\n def predicted_weights(self, yp):\n wd_a = np.dot(np.linalg.inv(np.dot(self.X.transpose(), self.X)), self.X.transpose())\n return np.dot(wd_a, yp)\n\n def predict_score(self,X, ytr, ypr, verbose=False):\n pass\n\n def report_scores(self, ytr, ypr, verbose=False, Ymean=None):\n if Ymean is None:\n Ymean = self.Ymean\n n = len(ytr)\n mmse = self.MSE(ytr, ypr, n)\n print('n',n)\n print('ypr', len(ypr))\n print(' ----------------RMSE:',np.sqrt(mmse))\n print(' -----------------MSE: ', mmse)\n print(' ---------sklearn MSE:', metrics.mean_squared_error(ytr, ypr))\n print(' -------var explained:', metrics.explained_variance_score(ytr, ypr))\n print(' -----------------MAE:', self.MAE(ytr, ypr, n))\n print(' -----------------CD:', self.R2(ytr, ypr, Ymean))\n print(' ---------sklearn r2:', metrics.r2_score(ytr, ypr))\n print(' -----------------R^2', self.Rvar(ytr, ypr, Ymean))\n print('-------------------------------------------------------------')\n print('-------------------------------------------------------------\\n')\n\n def cost_derivative(self, X, ytruth, ypred, cost_fnc='mae'):\n if cost_fnc == 'mae':\n maePrime_w = -1/len(ytruth) * np.dot((ytruth-ypred)/(abs(ytruth - ypred)),X)\n maePrime_b = -1 / len(ytruth) * sum([(yt-yp)/abs(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n return [maePrime_w, maePrime_b]\n if cost_fnc == 'mse':\n print('mse')\n msePrime_w = -2/len(ytruth) * np.dot((ytruth-ypred), X)\n msePrime_b = -2 / len(ytruth) * sum([(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n return [msePrime_w, msePrime_b]\n\n\n def fit(self,solver='mae'):\n #est = .0001\n #est = .00992\n est = self.eta\n etamax = self.eta\n etamin = self.etamin\n epochs = self.epochs\n kmax = self.kmax\n self.test_cod, self.test_epochs, self.test_mae, self.test_rmse = list(), list(), list(), list()\n #self.w = np.array([0]*self.d)\n #self.w = self.wd\n #print(self.w)\n n = self.N\n print('-----------------------N', self.N)\n print(self.cost_fnc)\n d = self.d\n mmse_old = 0\n mmae_old = 0\n old_thresh = list()\n for i in range(epochs):\n #np.random.shuffle(self.X.values)\n # get a prediction\n #print('y means', self.Ymean)\n #print('ytest means', self.Ymeants)\n #print('w\\n', self.w)\n yp = np.dot(self.X, self.w) + self.c\n #print('yp\\n',yp)\n #print('gf\\n', self.Y)\n mmse =self.MSE(self.Y, yp, n)\n mmae =self.MAE(self.Y, yp, n)\n rmse = self.RMSE(self.Y, yp)\n self.test_epochs.append(i)\n self.test_rmse.append(rmse)\n self.test_cod.append(self.R2(self.Y, yp))\n self.test_mae.append(mmae)\n if self.cost_fnc == 'mse':\n old_thresh.append(mmse)\n elif self.cost_fnc == 'mae':\n old_thresh.append(mmae)\n if self.cost_fnc == 'mse' and mmse < self.best_MSE:\n print('-------------------------------------------------------------- New Best MSE:', mmse)\n self.best_MSE = mmse\n self.best_MAE = mmae\n self.best_RMSE = rmse\n self.best_COD = self.R2(self.Y, ypred=yp, ymean=self.Ymean)\n self.best_b = self.c\n self.best_w = self.w\n self.best_score = mmse\n self.best_epoch = i\n if self.cost_fnc == 'mae' and mmae < self.best_MAE:\n print('-------------------------------------------------------------- New Best MAE:', mmae)\n self.best_MAE = mmae\n self.best_MSE = mmse\n self.best_RMSE = rmse\n self.best_COD = self.R2(self.Y, ypred=yp, ymean=self.Ymean)\n self.best_b = self.c\n self.best_w = self.w\n self.best_score = mmae\n self.best_epoch = i\n\n\n #yp = self.X*self.w + self.c\n\n #print('pred',yp)\n #D_m = (-2/n) * sum(np.dot(self.X.transpose(), (self.Y - yp)))\n #D_m = (-2/n) * sum(self.X.values.reshape(self.N, 1) * (self.Y - yp))\n #D_m = (-2/n) * sum((self.Y - yp).values.reshape(self.N, 1)*self.X)\n #D_m = (-2/n) * sum((self.X.reshape(1, self.N))*(self.Y - yp))\n #D_m = (-2/n) * sum((self.X.reshape(1, self.N))*(self.Y - yp))\n #D_m = (-2/n) * sum((self.X.transpose())*(self.Y - yp))\n #print('-----------------------------------------')\n #print(self.X.shape)\n #print(self.Y-yp)\n\n w_b = self.cost_derivative(self.X, self.Y, yp, self.cost_fnc)\n\n D_m = w_b[0]\n D_c = w_b[1]\n #D_m = (-1/n) * (np.dot(1/abs(self.Y - yp), self.X))\n #D_c = (-1/n)* (1/sum(abs(self.Y-yp)))\n self.w = self.w - self.eta*D_m\n self.c = self.c - self.eta*D_c\n print('eta: {} -----------------RMSE:'.format(self.eta), np.sqrt(mmse))\n print('Epoch: {} -----------------MSE:'.format(i+1), mmse)\n print(' ---------sklearn MSE:'.format(i+1), metrics.mean_squared_error(self.Y, yp))\n print(' ---------var explained:'.format(i+1), metrics.explained_variance_score(self.Y, yp))\n print(' -----------------MAE', self.MAE(self.Y, yp, n))\n print(' -----------------CD:', self.R2(self.Y, yp, self.Ymean))\n print(' ---------sklearn r2:'.format(i+1), metrics.r2_score(self.Y, yp))\n print(' -----------------R^2', self.Rvar(self.Y, yp, self.Ymean))\n print('-------------------------------------------------------------')\n print('-------------------------------------------------------------\\n')\n #if abs(mmse - mmse_old) < .00000000001:\n lm = self.lm\n if len(old_thresh) >= lm and abs(sum(old_thresh[-lm:])/lm - old_thresh[-1]) < self.tol:\n print('-- -- -- -- -- -- -- ****** thresh met {} ******'.format(abs(sum(old_thresh[-lm:])/lm - old_thresh[-1])))\n break\n if self.cost_fnc == 'mae' and abs(mmae - mmae_old) < self.tol *.00001:\n #print('thresh met {}'.format(abs(mmse - mmse_old)))\n print(' ****** thresh met {} ******'.format(abs(mmae - mmae_old)))\n break\n if self.cost_fnc == 'mse' and abs(mmse - mmse_old) < self.tol * .00001:\n print(' ****** thresh met {} ******'.format(abs(mmse - mmse_old)))\n break\n mmse_old = mmse\n mmae_old = mmae\n self.eta = epsilon(emax=etamax, emin=etamin, k=i, kmax=kmax)\n\n def fit2(self, cmodel):\n cnt = 0\n est = 1/100000\n self.eta = est\n etamax = est\n etamin = est*.01\n kmax = 10000000\n threshold = .1\n dif = 1000000\n #ymean = self.Y.values.mean(axis=0)\n ymean = self.Ymean\n self.w = self.wd\n w = self.w\n while .0001 < dif:\n pred = []\n \"\"\"\n # go through making predictions correcting the error as you go\n #for x, y, w in zip(self.X, self.Y, self.w.transpose()):\n for x, y in zip(self.X, self.Y):\n # make prediction\n #print('shape of x')\n #print(self.wd.transpose().shape[0])\n #print('wd')\n #print(self.w)\n #print(self.wd.shape)\n g = np.dot(x, self.w.transpose())\n pred.append(g)\n # get the error of the derirvative\n\n #wd = np.dot(np.linalg.inv(np.dot(x.transpose(), x)),x.transpose())\n #print('w')\n #print(self.w)\n #cw = np.dot(wd, self.Y)\n #div = -2*np.linalg.norm(self.w - self.wd)\n #print('g')\n #print(g)\n #print(' y')\n #print(y)\n #print('error', div)\n #self.w = self.w - eta*div\n #self.wd = self.wd - eta*div\n #print(self.w)\n # once predict done calculate error and if\n if cnt > 10:\n k = 0\n \"\"\"\n\n \"\"\"\n #score\n sum = 0\n rss = 0\n tss = 0\n for g, y in zip(pred, self.Y):\n sum += (g - y)**2\n rss += (g - ymean)**2\n tss += (y-ymean)**2\n print('sum', sum)\n scr = (sum/self.N)\n rsqu = (rss/tss)\n print('MSE')\n print(scr)\n print('rsqur')\n print(1- rsqu)\n \"\"\"\n yp = self.predict(self.Xts)\n #print(yp[0:5])\n #print(self.Y[0:5])\n mae, mse, rsqu, rvar, mse_prime = self.score(ypred=yp)\n #mae, mse, rsqu, rvar, mse_prime = self.score(ypred=pred)\n print('Epoch: {} eta:{}, mae: {}, mse: {}, R2: {}, Rvar: {}, dif {}'.format(cnt, np.around(self.eta,3), mae, mse, rsqu, rvar, dif))\n old = self.wd\n #print('old')\n #print(old)\n #print('w')\n #print(self.w)\n print('prime')\n print(mse_prime[0][0:self.d])\n print('prime')\n print(mse_prime[1])\n self.wd[0:self.d] = self.wd[0:self.d] - self.eta * mse_prime[0]\n self.wd[self.d] = self.wd[self.d] - self.eta * mse_prime[1]\n\n #print('old')\n #print(old)\n #print('w')\n #print(self.w)\n dif = abs(np.dot(self.w, old))\n\n print('')\n self.eta = epsilon(emax=etamax, emin=etamin, k=cnt, kmax=kmax)\n\n cnt += 1\n\n def g_OLS(self, x, y):\n pass\n\n def predict(self, X):\n return np.dot(X, self.best_w) + self.best_b\n\n def SSE(self, ytrue, ypred):\n return sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n\n def MSE(self, yt, yp, n=None):\n if n is None:\n n = len(yt)\n return self.SSE(yt, yp)/n\n def RMSE(self, yt, yp, n=None):\n if n is None:\n n = len(yt)\n return sqrt(self.SSE(yt, yp)/n)\n def MAE(self, ytrue, ypred, n=None):\n if n is None:\n n = len(ytrue)\n return sum([abs(yt - yp) for yp, yt in zip(ytrue, ypred)]) / n\n\n def SSREG(self, ypred, ymean):\n return sum([(yp - ymean) ** 2 for yp in ypred])\n\n def SSRES(self, ytrue, ypred):\n return sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n\n def R2(self, ytrue, ypred, ymean=None):\n if ymean is None:\n ymean = self.Ymean\n return 1 - (self.SSRES(ytrue, ypred)/self.SSTOT(ytrue, ymean))\n\n def Rvar(self, ytrue, ypred, ymean):\n ssreg = self.SSREG(ytrue, ymean=ymean)\n ssres = self.SSRES(ytrue=ytrue, ypred=ypred)\n return (self.SSREG(ypred, ymean)/self.N)/(self.SSTOT(ytrue, ymean)/self.N)\n # return self.SSREG(ypred, ymean)/ (ssres + ssreg)\n\n def SSTOT(self, ytrue, ymean):\n return sum([(yt - ymean) ** 2 for yt in ytrue]) # scatter total (sum of squares)\n\n\n def score(self, ypred, ytrue=None, ymean=None, verbose=False):\n \"\"\"returns a number of scoring metrics for the predictions from a linear regression\n :param ypred: predicted values from a learner\n :param ytrue: the ground truth values\n :param ymean: the average value for the target variable\n :param verbose: how talkative you want the scoring to be\n :return: mae (mean absolute error), mse(mean square error), R2 (coefficient of determination), R2var (proportion of variance explained)\n \"\"\"\n if ytrue is None:\n ytrue = self.Yts\n if ymean is None:\n ymean = self.Ymeants\n\n ssres = sum([(yt-yp)**2 for yp, yt in zip(ytrue, ypred)]) # residual sum of squares (error)\n mae = sum([abs(yt-yp) for yp, yt in zip(ytrue, ypred)])/len(self.Xts) # mean absolute error\n sstot = sum([(yt-ymean)**2 for yt in ytrue]) # scatter total (sum of squares)\n ssreg = sum([(yp-ymean)**2 for yp in ypred]) # sum of sqaures(variance from mean of predictions)\n mse_prime = []\n mse = ssres/len(self.Xts)\n mse_prime.append(-2*sum([np.dot(x,(yt-yp)) for yp, yt, x in zip(ytrue, ypred, self.Xts)])/len(self.Xts))\n mse_prime.append(-2*sum([(yt-yp) for yt, yp in zip(ytrue, ypred)])/len(self.Xts))\n R2 = 1 - (ssres)\n R2var = ssreg/max(.01, (ssres + ssreg))\n\n return mae, mse, R2, R2var, mse_prime\n\ndef random_forest_tester(X_tr, y_tr, X_ts, y_ts, verbose=False, param_grid=None, s=0, cv=5, save_feats=False):\n if param_grid is None:\n param_grid = {\n # 'n_estimators': [1500, 1800, 2000], # how many trees in forest\n 'n_estimators': [1000, 2200], # how many trees in forest\n # 'max_features': [None, 'sqrt', 'log2'], # maximum number of features to test for split\n 'max_features': [None], # maximum number of features to test for split\n # 'max_features': ['sqrt'],\n # 'criterion': ['gini'],\n 'criterion': ['entropy'], # how best split is decided\n # 'max_depth': [None, 10, 100, 1000, 10000], #\n # 'max_depth': [None, 10, 100], # how large trees can grow\n # 'max_depth': [None, 10, 20], # how large trees can grow\n 'max_depth': [50, None], # how large trees can grow\n 'oob_score': [True], #\n # 'min_samples_leaf': [1, 3, 5], # The minimum number of samples required to be at a leaf node\n 'min_samples_leaf': [1], # The minimum number of samples required to be at a leaf node\n # 'max_leaf_nodes': [None, 2, 10],\n 'max_leaf_nodes': [None],\n 'min_weight_fraction_leaf': [0], #\n # 'min_samples_split': [2, .75],\n 'min_samples_split': [2],\n 'min_impurity_decrease': [0, .01],\n 'random_state': [None],\n # 'class_weight': [None,]\n 'class_weight': ['balanced_subsample', 'balanced', None, {0: .4, 1: .6}]\n }\n\n RF_clf0 = RandomForestClassifier()\n scorers0 = {\n 'recall_score': make_scorer(recall_score),\n 'accuracy_score': make_scorer(accuracy_score), # (TP + TN) / (TP+FP+TN+FN), overall accuracy of model\n 'precision_score': make_scorer(precision_score),\n 'confusion_matrix': make_scorer(confusion_matrix)\n }\n scorersSens = {\n 'recall_score': make_scorer(recall_score)\n }\n scorersAcc = {\n 'accuracy_score': make_scorer(accuracy_score) # (TP + TN) / (TP+FP+TN+FN), overall accuracy of model\n }\n scorersPrec = {\n 'precision_score': make_scorer(precision_score),\n # TP/(TP+FP), a metric of models ability to not miss label a positive\n }\n scorers = [scorersAcc, scorersSens, scorersPrec]\n scr = ['accuracy_score', 'recall_score', 'precision_score']\n GSCV_clf0 = GridSearchCV(estimator=RF_clf0, param_grid=param_grid, cv=cv, scoring=scorers[s], refit=scr[s])\n GSCV_clf0.fit(X_tr, y_tr)\n print('Scoring for {:s}'.format(scr[s]))\n print('ZBest Params:')\n print(GSCV_clf0.best_params_)\n print('best score: ',GSCV_clf0.best_score_)\n RF_clfstd = GSCV_clf0.best_estimator_\n feature_impz = RF_clfstd.feature_importances_\n ypz = RF_clfstd.predict(X_ts)\n feates = viz.display_significance(feature_impz, X_tr.columns.values.tolist(), verbose=True)\n if save_feats:\n pd.DataFrame({'variables': list(feates.keys()), 'Sig': list(feates.values())}).to_excel(\n 'RandomForest_Feature_significance_{}_.xlsx'.format(today_is()))\n accuracy, scores, posneg, = bi_score(ypz, y_ts, vals=[0, 1], classes='')\n print('Sensitivity:', posneg['sen'])\n viz.show_performance(scores=scores, verbose=True)\n print('=================================================================================================')\n print('=================================================================================================')\n\ndef logistic_tester(X_tr, y_tr, X_ts, y_ts, verbose=False, param_grid=None, pg=1, s=0, cv=5, save_feats=False):\n if param_grid is None:\n # set up parameter grid for grid search testing\n param_gridB = {'penalty': ['elasticnet'],\n 'dual': [False],\n 'tol': [1e-4, 1e-6],\n 'Cs': [10],\n 'fit_intercept': [True],\n 'class_weight': ['balanced', {0: .6, 1: .4}, {0: .4, 1: .6}],\n 'solver': ['saga'],\n 'max_iter': [5000, 100000],\n }\n param_gridA = {'penalty': ['l2'],\n 'dual': [False],\n 'tol': [1e-1, 1e-3],\n 'Cs': [10, 1, 5],\n 'cv': [3, 5],\n 'fit_intercept': [True],\n 'class_weight': ['balanced', {0: .5, 1: .5}, {0: .55, 1: .45}],\n 'solver': ['newton-cg', 'lbfgs', 'sag'],\n 'max_iter': [1000, 5000, 10000],\n }\n param_gridl = {'penalty': ['l1'],\n 'dual': [False],\n 'tol': [1e-2, 1e-3],\n 'Cs': [10],\n 'cv': [3, 5],\n 'fit_intercept': [True],\n # 'class_weight': [{0: .5, 1: .5}, {0: .6, 1: .4}],\n 'class_weight': ['balanced', {0: .5, 1: .5}, {0: .6, 1: .4}],\n 'solver': ['liblinear', 'saga'],\n # 'max_iter': [1000, 2000, 5000],\n 'max_iter': [900, 2000, 5000],\n }\n param_grid = [param_gridB,param_gridA, param_gridl]\n param_grid = param_grid[pg]\n\n # create the classifier\n log_clf0 = LogisticRegressionCV()\n RF_clf0 = log_clf0\n scorers0 = {\n 'recall_score': make_scorer(recall_score),\n 'accuracy_score': make_scorer(accuracy_score), # (TP + TN) / (TP+FP+TN+FN), overall accuracy of model\n 'precision_score': make_scorer(precision_score),\n 'confusion_matrix': make_scorer(confusion_matrix)\n }\n scorersSens = {\n 'recall_score': make_scorer(recall_score)\n }\n scorersAcc = {\n 'accuracy_score': make_scorer(accuracy_score) # (TP + TN) / (TP+FP+TN+FN), overall accuracy of model\n }\n scorersPrec = {\n 'precision_score': make_scorer(precision_score),\n # TP/(TP+FP), a metric of models ability to not miss label a positive\n }\n scorers = [scorersAcc, scorersSens, scorersPrec]\n scr = ['accuracy_score', 'recall_score', 'precision_score']\n s = 0\n cv = 5\n # perform the grid search cross validation\n GSCV_clf0 = GridSearchCV(estimator=RF_clf0, param_grid=param_grid, cv=cv, scoring=scorers[s], refit=scr[s])\n GSCV_clf0.fit(X_tr, y_tr)\n print('Scoring for {:s}'.format(scr[s]))\n print('ZBest Params for set 1:')\n print(GSCV_clf0.best_params_)\n print('best score: ', GSCV_clf0.best_score_)\n\n RF_clfstd = GSCV_clf0.best_estimator_\n ypz = RF_clfstd.predict(X_ts)\n # fit the\n # RF_clfstd.fit(X_trz, y_train0)\n\n feature_impz = RF_clfstd.coef_[0]\n # from D_Space import get_current_date\n feates = viz.display_significance(feature_impz, X_tr.columns.values.tolist(), verbose=True)\n pd.DataFrame({'variables': list(feates.keys()), 'Sig': list(feates.values())}).to_excel(\n 'Logistic_correlations.xlsx')\n # generate_excel(dic=feates, name='NREL_FEAT_{}_.xlsx'.format(get_current_date()))\n accuracy, scores, posneg, = bi_score(ypz, y_ts, vals=[0,1], classes='')\n print('Sensitivity:', posneg['sen'])\n viz.show_performance(scores=scores, verbose=True)\n# =========================================================================\n# =========================================================================\n# numpy tools\n# =========================================================================\n# =========================================================================\ndef get_int_mean(na):\n return np.array((np.around(na.mean(axis=0), 0)), dtype=int)\n\ndef col_sort(df, col=0):\n return df[df[:, col].argsort()]\n\ndef get_select_max_idx(na_row, selection):\n \"\"\" find the maximum distance in the given row\n based on the columns (other points) in the selection list\n the idea is that it will find the minimum distance in the\n sample rows row in the distance look up table, to points\n in some other cluster\n\n :param na_row: a samples row in the distance look up table, the columns\n represent the other points in the sample population\n :param selection: the points you want to look at the distances too,\n represent points in some other cluster\n :return: returns the maximum distance and the point that relates to it\n \"\"\"\n maxi = na_row[selection].max()\n ret = np.where(na_row == maxi)\n ret = ret[0][0]\n return ret, maxi\n\ndef get_select_min_idx(na_row, selection):\n \"\"\" find the minimum distance in the given row\n based on the columns (other points) in the selection list\n the idea is that it will find the minimum distance in the\n sample rows row in the distance look up table, to points\n in some other cluster\n\n :param na_row: a samples row in the distance look up table, the columns\n represent the other points in the sample population\n :param selection: the points you want to look at the distances too,\n represent points in some other cluster\n :return: returns the minimum distance and the point that relates to it\n \"\"\"\n mini = na_row[selection].min()\n ret = np.where(na_row == mini)\n ret = ret[0][0]\n return ret, mini\n# =========================================================================\n# =========================================================================\n# Usefull math tools\n# =========================================================================\n# =========================================================================\ndef mahalanobis_distance(X, mu, cov, is_std=False):\n #print('covariance')\n #print(cov\n xminmu = X - mu\n #print('x - mu')\n #print(xminmu)\n if is_std:\n return np.sqrt((np.dot(xminmu.transpose(), xminmu))/cov)\n return np.sqrt(np.dot(np.dot(xminmu.transpose(), np.linalg.inv(cov)), xminmu))\n\n\ndef euclidean_distance(X, mu, std, is_std=False):\n #print('covariance')\n #print(cov\n xminmu = X - mu\n #print('x - mu')\n #print(xminmu)\n return np.sqrt((np.dot(xminmu.transpose(), xminmu))/std)\n\n\ndef ppm_MSE(ppm1, ppm2):\n # get their covariance matricies\n #print('originals')\n #print(ppm1)\n #print(ppm2)\n cov_1 = pd.DataFrame(ppm1).cov()\n cov_2 = pd.DataFrame(ppm2).cov()\n #print('covariances')\n #print(cov_1.head())\n #print(cov_2.head())\n cov_dif = cov_1 - cov_2\n sum_e = 0\n #for row1, row2 in zip()\n #print('covariance and its square')\n #print(cov_dif.head())\n cov_dif = (cov_dif ** 2)\n #print(cov_dif.head())\n sum = 0\n for row in cov_dif.values:\n sum += row.sum()\n return sum/len(ppm1)\n\n\ndef ppm_AAE(ppm1, ppm2):\n # get their covariance matricies\n #print('originals')\n #print(ppm1)\n #print(ppm2)\n cov_1 = pd.DataFrame(ppm1).cov()\n cov_2 = pd.DataFrame(ppm2).cov()\n #print('covariances')\n #print(cov_1.head())\n #print(cov_2.head())\n cov_dif = cov_1 - cov_2\n sum_e = 0\n #for row1, row2 in zip()\n\n #print(cov_dif.head())\n cov_dif = np.abs(cov_dif)\n #print(cov_dif.head())\n sum = 0\n for row in cov_dif.values:\n sum += row.sum()\n return sum/len(ppm1)\n\n\n# =========================================================================\n# =========================================================================\n# Scoring tools\n# =========================================================================\n# =========================================================================\ndef bi_score(g, y, vals, classes='', method='accuracy', verbose=False, train=False, retpre=False):\n scores = {'tp':0,\n 'fp':0,\n 'fn':0,\n 'tn':0,}\n posneg = {'bestn':0,\n 'bestp':0,\n 'predictions':list(g)}\n\n # go through the guesses and the actual y values scoring\n # * true positives: tp\n # * false positives: fp\n # * true negatives: tn\n # * false negatives: fn\n for gs, ay in zip(g,y.values):\n # check for negative\n if int(gs) == int(vals[0]):\n if int(ay) == int(gs):\n scores['tn'] += 1\n else:\n scores['fn'] += 1\n elif int(gs) == int(vals[1]):\n if int(ay) == int(gs):\n scores['tp'] += 1\n else:\n scores['fp'] += 1\n else:\n print('Uh Oh!!!!!: {0}'.format(gs))\n print('line number 2461')\n quit(-463)\n\n posneg['bestn'] = scores['tn']/(scores['fp']+scores['tn'])\n posneg['bestp'] = scores['tp']/(scores['tp']+scores['fn'])\n\n # calculate and return the overall accuracy\n if method == 'accuracy':\n if retpre:\n accuracy, sum, sensitivity, specificity, precision = viz.show_performance(scores=scores,\n verbose=verbose, retpre=retpre)\n posneg['Sensitivity'] = sensitivity\n posneg['Specificity'] = specificity\n posneg['Precision'] = precision\n else:\n accuracy, sum, sensitivity, specificity = viz.show_performance(scores=scores, verbose=verbose,)\n posneg['sen'] = sensitivity\n posneg['spe'] = specificity\n if train:\n return accuracy, scores, posneg\n return accuracy, scores, posneg\n\n\n# =========================================================================\n# =========================================================================\n# Result Recording analysis and documentation\n# =========================================================================\n# =========================================================================\n# *** *** *** *** *** *** can be used to keep records of tests\nclass ResultsLog:\n \"\"\"\n This class can store different results of ML testing\n \"\"\"\n def __init__(self, result_dict, df_old_log=None, infile_name_old_log=None, sheet_name=None, sort_bys=None,\n outfile_name_updated_log=None, usecols=None, verbose=False):\n self.result_dict = result_dict\n self.record_name = list(result_dict.keys()) # the names of the attributes\n self.records = list(result_dict.values()) # the values to be added to the log\n self.df_old_log = df_old_log # the data frame that contains the logged data if needed, can be left None and will be loaded based on the old log file\n self.infile_name_old_log = infile_name_old_log # the name of the file to be added to TODO: need to add checker method for file/diretory existence and remove this\n if outfile_name_updated_log is not None:\n self.outfile_name_updated_log=outfile_name_updated_log # TODO: currently neccessary soon to be optional name of new log file if desired\n else:\n self.outfile_name_updated_log=infile_name_old_log # TODO: currently neccessary soon to be optional name of new log file if desired\n self.sheet_name=sheet_name # TODO: modify the saving portion to use an excel writer so I can get at specific sheets w/o overwriting the old file\n self.sort_bys=sort_bys # optional if you want the log file sorted in a specific way\n self.usecols=usecols # optional: can select specific columns of the log file to log\n self.verbose=verbose\n if df_old_log is None and (infile_name_old_log is not None):\n self.process_file_name()\n elif df_old_log is not None:\n self.process_df()\n\n def process_file_name(self,):\n # if the file exists\n if os.path.isfile(self.infile_name_old_log):\n if self.verbose:\n print('The file {} loading...'.format(self.infile_name_old_log))\n if self.usecols is None:\n df_old = pd.read_excel(self.infile_name_old_log)\n else:\n df_old = pd.read_excel(self.infile_name_old_log, usecols=self.usecols)\n df_old = concat_columns(df_old, self.record_name, self.records)\n if self.sort_bys is None:\n df_old.to_excel(self.outfile_name_updated_log, index=False)\n else:\n df_old.sort_values(by=self.sort_bys, inplace=False, ascending=False).to_excel(\n self.outfile_name_updated_log, index=False)\n # if the file does not exist\n else:\n df_old = pd.DataFrame()\n for p, v in zip(self.record_name, self.records):\n df_old[p] = list([v])\n if self.sort_bys is None:\n df_old.to_excel(self.outfile_name_updated_log, index=False)\n else:\n df_old.sort_values(by=self.sort_bys, inplace=False, ascending=False).to_excel(\n self.outfile_name_updated_log, index=False)\n if self.verbose:\n print('The file {} created...'.format(self.infile_name_old_log))\n\n \"\"\" \n if self.sheet_name is None:\n if self.sort_bys is None:\n dumdf = concat_columns(df_old, self.record_name, self.records)\n dumdf.to_excel(self.outfile_name_updated_log, index=False)\n else:\n dumdf = concat_columns(df_old, self.record_name, self.records)\n dumdf.sort_values(by=self.sort_bys, inplace=False, ascending=False).to_excel(self.outfile_name_updated_log, index=False)\n else:\n if self.sort_bys is None:\n # dumdf = concat_columns(df_old, self.record_name, self.records)\n df_old.to_excel(self.outfile_name_updated_log, index=False, sheet_name=self.sheet_name)\n else:\n df_old.sort_values(by=self.sort_bys, inplace=True, ascending=False).to_excel(self.outfile_name_updated_log, index=False, sheet_name=self.sheet_name)\n \"\"\"\n\n def process_df(self,):\n if self.sheet_name is None:\n if self.sort_bys is None:\n concat_columns(self.df_old_log, self.record_name, self.records).to_excel(self.outfile_name_updated_log, index=False)\n else:\n concat_columns(self.df_old_log, self.record_name, self.records).sort_values(by=self.sort_bys, inplace=True, ascending=True).to_excel(self.outfile_name_updated_log, index=False)\n else:\n if self.sort_bys is None:\n concat_columns(self.df_old_log, self.record_name, self.records).to_excel(self.outfile_name_updated_log, index=False, sheet_name=self.sheet_name)\n else:\n concat_columns(self.df_old_log, self.record_name, self.records).sort_values(by=self.sort_bys, inplace=True).to_excel(self.outfile_name_updated_log, index=False, sheet_name=self.sheet_name)\n\n# =========================================================================\n# =========================================================================\n# TODO: Grid searches\n# =========================================================================\n# =========================================================================\n\nclass GGridSearcher():\n def __init__(self, cmodel, Xtr=None, ytr=None, Xts=None, yts=None, clf=None, param_dict=None, verbose=False,\n m_type='classifier', make_reports=True, non_prediction=False, attribs=None, model_vars=None,\n current_model=None, newfile_Per=None, newfile_FI=None, newfile_Re=None, new_tree_png=None):\n if cmodel is not None:\n self.cmodel = cmodel\n self.Xtr = cmodel.X\n self.ytr = cmodel.y\n self.Xts = cmodel.Xts\n self.yts = cmodel.yts\n else:\n self.cmodel = cmodel\n self.Xtr = Xtr\n self.ytr = ytr\n self.Xts = Xts\n self.yts = yts\n self.clf=clf\n self.verbose=verbose\n self.m_type = m_type\n self.param_dict=param_dict\n self.make_reports = make_reports\n self.SkSVCparam_dict = None\n self.GLinRegparam_dict = None\n self.GClstrparam_dict = None\n self.SkKmuparam_dict = None\n self.SkMbKmuparam_dict = None\n self.SKRFparam_dict=None\n self.non_prediction = non_prediction\n self.attribs = attribs\n self.current_model=current_model\n self.model_vars=model_vars\n self.newfile_Per=newfile_Per\n self.newfile_FI=newfile_FI\n self.newfile_Re=newfile_Re\n self.new_tree_png=new_tree_png\n\n def set_clf(self,clf):\n self.clf=clf\n def set_param_grid(self,param_dict):\n self.param_dict=param_dict\n def set_verbose(self,verbose):\n self.verbose=verbose\n def get_clf(self, ):\n return self.clf\n def get_param_grid(self, ):\n return self.param_dict\n def get_verbose(self, verbose):\n return self.verbose\n\n def GO(self, report_dict, file=None, sortbys = None, sheet_name=None, usecols=None, ):\n \"\"\"\n Can be used to run a series of grid search runs for various algorithms.\n The different types are controled by the parameter self.clf\n currently the options are:\n * skleranSVC\n * sklearnKmu\n * sklearnRandomForest\n\n :param report_dict: a dictionary containing the performance results you want logged\n :param file: the file you would like to store the report log into\n :param sortbys: the columns you would like to sort the result log by if any\n :param sheet_name: the sheet name of the log file if any TODO: need to create an excel writer method so I can manipulate the sheets\n :param usecols: the columns of the model tested\n :return:\n \"\"\"\n file = self.newfile_Re\n if self.clf == 'sklearnSVC':\n from sklearn.svm import SVC\n self.SkSVCparam_dict = {'C': [1],\n 'kernel': ['rbf'], # kernel type used for algorithm\n 'degree': [3], # degree used for polynomial kernel, ignored by all others\n 'gamma': ['scale'], # scale (sigma) used in kernel\n 'coef0': [0], # bias for poly and sigmoid kernels\n 'shrk':[True], # whether to use shrinking huristic\n 'dsf':['ovr'], # use one-vs-rest or one vs one (ovo)\n 'cw':['balanced'], # weights of different classes (priors)\n 'prob':[False], # Whether to enable probability estimates.\n 'tol':[1e-3], # Tolerance for stopping criterion.\n 'max_it':[-1]} # max allowable iterations\n # change defaults to passed settings\n for pu in self.param_dict:\n if pu in self.SkSVCparam_dict:\n self.SkSVCparam_dict[pu] = self.param_dict[pu]\n # now do grid search\n C = self.SkSVCparam_dict['C']\n Krnl= self.SkSVCparam_dict['kernel']\n dgr = self.SkSVCparam_dict['degree']\n gma = self.SkSVCparam_dict['gamma']\n co = self.SkSVCparam_dict['coef0']\n shk = self.SkSVCparam_dict['shrk']\n dsf = self.SkSVCparam_dict['dsf']\n clsw = self.SkSVCparam_dict['cw']\n prob = self.SkSVCparam_dict['prob']\n tol = self.SkSVCparam_dict['tol']\n mxit = self.SkSVCparam_dict['max_it']\n for c in C:\n for k in Krnl:\n for mx in mxit:\n for s in shk:\n for df in dsf:\n for p in prob:\n for t in tol:\n for cw in clsw:\n if k in ['rbf', 'poly', 'sigmoid']:\n for g in gma:\n if k in ['poly', 'sigmoid']:\n for coef in co:\n if k is 'poly':\n for d in dgr:\n svc_clf = SVC(C=c, kernel=k, degree=d, gamma=g,\n coef0=coef, shrinking=s, probability=p,\n tol=t, class_weight=cw, max_iter=mx,\n decision_function_shape=df)\n strtm = time.time()\n svc_clf.fit(self.Xtr, self.ytr.values.flatten())\n trpast = time_past(strtm)\n yp = svc_clf.predict(self.Xts)\n acc, scr, posneg = bi_score(yp, self.yts, vals=[0,1], retpre=True)\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n report_dict['C'] = c\n report_dict['kernel'] = k\n report_dict['degree'] = d\n report_dict['gamma'] = g\n report_dict['coef0'] = coef\n report_dict['shrk'] = s\n report_dict['dsf'] = df\n report_dict['cw'] = cw\n report_dict['prob'] = p\n report_dict['tol'] = t\n report_dict['max_it'] = mx\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n else:\n svc_clf = SVC(C=c, kernel=k, gamma=g,\n coef0=coef, shrinking=s, probability=p,\n tol=t, class_weight=cw, max_iter=mx,\n decision_function_shape=df)\n strtm = time.time()\n svc_clf.fit(self.Xtr, self.ytr.values.flatten())\n trpast = time_past(strtm)\n yp = svc_clf.predict(self.Xts)\n acc, scr, posneg = bi_score(yp, self.yts, vals=[0, 1], retpre=True)\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n report_dict['C'] = c\n report_dict['kernel'] = k\n report_dict['degree'] = -1\n report_dict['gamma'] = g\n report_dict['coef0'] = coef\n report_dict['shrk'] = s\n report_dict['dsf'] = df\n report_dict['cw'] = cw\n report_dict['prob'] = p\n report_dict['tol'] = t\n report_dict['max_it'] = mx\n report_dict['time'] = trpast\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n else: # when rbf\n for g in gma:\n svc_clf = SVC(C=c, kernel=k, gamma=g,\n shrinking=s, probability=p,\n tol=t, class_weight=cw, max_iter=mx,\n decision_function_shape=df)\n strtm = time.time()\n svc_clf.fit(self.Xtr, self.ytr.values.flatten())\n trpast = time_past(strtm)\n yp = svc_clf.predict(self.Xts)\n acc, scr, posneg = bi_score(yp, self.yts, vals=[0, 1], retpre=True)\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n report_dict['Accuracy'] = acc\n report_dict['C'] = c\n report_dict['kernel'] = k\n report_dict['degree'] = -1\n report_dict['gamma'] = g\n report_dict['coef0'] = -1\n report_dict['shrk'] = s\n report_dict['dsf'] = df\n report_dict['cw'] = cw\n report_dict['prob'] = p\n report_dict['tol'] = t\n report_dict['max_it'] = mx\n report_dict['time'] = trpast\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n else: # if linear\n svc = SVC(C=c, kernel=k, )\n strtm = time.time()\n svc_clf = SVC(C=c, kernel=k, shrinking=s, probability=p,\n tol=t, class_weight=cw, max_iter=mx,\n decision_function_shape=df)\n svc_clf.fit(self.Xtr, self.ytr.values.flatten())\n trpast = time_past(strtm)\n yp = svc_clf.predict(self.Xts)\n acc, scr, posneg = bi_score(yp, self.yts, vals=[0, 1], retpre=True)\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n report_dict['C'] = c\n report_dict['kernel'] = k\n report_dict['degree'] = -1\n report_dict['gamma'] = -1\n report_dict['coef0'] = -1\n report_dict['shrk'] = s\n report_dict['dsf'] = df\n report_dict['cw'] = cw\n report_dict['prob'] = p\n report_dict['tol'] = t\n report_dict['max_it'] = mx\n report_dict['time'] = trpast\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n elif self.clf == 'sklearnKmu':\n from sklearn.cluster import KMeans as kmu\n self.SkKmuparam_dict = {'n_clusters':[2],\n 'init':['k-means++'],\n 'n_init':[10],\n 'max_iter':[300],\n 'tol':[1e-4],\n 'algorithm':['auto',]}\n # add user chosen test sets\n for pu in self.param_dict:\n if pu in self.SkKmuparam_dict:\n self.SkKmuparam_dict[pu] = self.param_dict[pu]\n # now do grid search\n n_clusters = self.SkKmuparam_dict['n_clusters']\n ini = self.SkKmuparam_dict['init']\n nini = self.SkKmuparam_dict['n_init']\n algo = self.SkKmuparam_dict['algorithm']\n tol = self.SkKmuparam_dict['tol']\n mxit = self.SkKmuparam_dict['max_iter']\n\n for ncl in n_clusters:\n for i in ini:\n for n in nini:\n for al in algo:\n for tl in tol:\n for mx in mxit:\n KM = kmu(n_clusters=ncl, init=i, algorithm=al, tol=tl, max_iter=mx, n_init=n)\n strtm = time.time()\n KM.fit(self.Xtr, self.ytr)\n trpast = time_past(strtm)\n yp = KM.predict(self.Xts)\n if ncl == 2:\n acc, scr, posneg = bi_score(yp, self.yts, vals=[0, 1], retpre=True)\n else:\n acc = metrics.accuracy_score(self.yts, yp)\n posneg = {}\n posneg['Sensitivity'] = -999\n posneg['Specificity'] = -999\n posneg['Precision'] = -999\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n report_dict['Accuracy'] = acc\n report_dict['Homogeneity'] = metrics.homogeneity_score(self.yts.values.flatten(), yp)\n report_dict['n_clusters'] = ncl\n report_dict['init'] = i\n report_dict['n_init'] = n\n report_dict['algorithm'] = al\n report_dict['tol'] = tl\n report_dict['max_iter'] = mx\n report_dict['time'] = trpast\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n elif self.clf == 'sklearnRandomForest':\n nruns = 1\n model_vars = self.model_vars\n current_model = self.current_model\n rl = self.attribs\n self.SKRFparam_dict = {\n 'n_estimators': [2200], # how many trees in forest\n 'max_features': [None], # maximum number of features to test for split\n 'criterion': ['entropy'], # how best split is decided\n 'max_depth': [None], # how large trees can grow\n 'oob_score': [True], #\n 'warm_start': [True],\n 'min_samples_leaf': [1], # The minimum number of samples required to be at a leaf node\n 'max_leaf_nodes': [None],\n 'min_weight_fraction_leaf': [0], #\n 'min_samples_split': [2],\n 'min_impurity_decrease': [0],\n 'random_state': [None],\n 'class_weight': [None],\n 'number of warm runs':1\n }\n for pu in self.param_dict:\n if pu in self.SKRFparam_dict:\n self.SKRFparam_dict[pu] = self.param_dict[pu]\n warm_start = self.SKRFparam_dict['warm_start']\n if warm_start:\n self.SKRFparam_dict['n_estimators'] = sorted(self.SKRFparam_dict['n_estimators'])\n nruns = self.SKRFparam_dict['number of warm runs']\n for ne in self.SKRFparam_dict['n_estimators']:\n for crit in self.SKRFparam_dict['criterion']:\n for mxd in self.SKRFparam_dict['max_depth']:\n for mln in self.SKRFparam_dict['max_leaf_nodes']:\n RF_clfstd = RandomForestClassifier(n_estimators=ne, criterion=crit, max_depth=mxd,\n warm_start=True, max_leaf_nodes=mln)\n best_estimator_fit_stime = time.time()\n for i in range(nruns):\n RF_clfstd.fit(self.Xtr, self.ytr)\n best_estimator_fit_etime = time.time() - best_estimator_fit_stime\n if self.verbose:\n print(\"Fitting the best one took {}\".format(best_estimator_fit_etime))\n feature_impz = RF_clfstd.feature_importances_\n testing_stime = time.time()\n ypz = RF_clfstd.predict(self.Xts)\n testing_etime = time.time() - testing_stime\n feates = display_significance(feature_impz, rl, verbose=True)\n scores0 = cross_val_score(RF_clfstd, self.Xts, self.yts, cv=2)\n avg_scr = scores0.mean()\n print('The Average score set {0}: {0}'.format(0, avg_scr))\n # score the models performance and show a confusion matrix for it\n accuracy, scores, posneg, = bi_score(ypz, self.yts, vals=[0, 1], classes='', retpre=True)\n nwim = self.new_tree_png\n tmpim = r'C:\\Users\\gjone\\DeepSolar_Code_Base\\tree.dot'\n if nwim is not None:\n print('creating')\n print(nwim)\n viz.display_DT(RF_clfstd.estimators_[0], rl, ['0','1'], newimg=nwim, tmpimg=tmpim,\n precision=2)\n\n # pd.DataFrame({'variables':list(feates.keys()), 'Sig':list(feates.values())}).to_excel('RandomForest_Feature_significance_18_{}_.xlsx'.format(get_current_date()))\n # TODO: below line store in generic time and date stamped file\n # generate_excel(dic=feates, name='RandomForest_Feature_significance_{}_.xlsx'.format(get_current_date()))\n\n if self.verbose:\n print(\n '=================================================================================================')\n print(\n '=================================================================================================')\n print(\n '=================================================================================================')\n print(\n '=================================================================================================')\n print(\n '=================================================================================================')\n print('Accuracy: {:.2f}'.format(accuracy))\n print('Cross val score: {:.3f}'.format(avg_scr))\n print('Sensitivity:', posneg['Sensitivity'])\n print('Specificity:', posneg['Specificity'])\n print('Precision:', posneg['Precision'])\n viz.show_performance(scores=scores, verbose=True)\n # print('Training/Testing Split {0}/{1}'.format(tr, ts))\n print('Training time {}'.format(best_estimator_fit_etime))\n print('Testing time {}'.format(testing_etime))\n print('Total time {}'.format(testing_etime + best_estimator_fit_etime))\n print('Model file ', current_model)\n print(\n '=================================================================================================')\n print(\n '=================================================================================================')\n # now save the results dummy\n params_re = {'Accuracy': accuracy,\n 'Cross_V2': avg_scr,\n 'Sensitivity': posneg['Sensitivity'],\n 'Precision': posneg['Precision'],\n 'Specificity': posneg['Specificity'], 'runs': 0,\n 'time': testing_etime + best_estimator_fit_etime}\n # TODO: fix this file and below as well\n #new_file = '__Data/__Mixed_models/policy/RF_Perf_{}_.xlsx'.format(\n # 'DeepSolar_Model_2019-12-30_mega')\n\n # store the log files if needed\n if self.make_reports:\n for r in posneg:\n if r in report_dict:\n report_dict[r] = posneg[r]\n\n report_dict['Accuracy'] = np.around(accuracy, 3)\n for r in self.SKRFparam_dict:\n report_dict[r] = self.SKRFparam_dict[r]\n\n ResultsLog(report_dict,\n infile_name_old_log=file,\n outfile_name_updated_log=file,\n sheet_name=sheet_name,\n usecols=usecols,\n sort_bys=sortbys)\n if self.newfile_Per is not None:\n pandas_excel_maker(self.newfile_Per, params_re, mode='performance')\n # RF_FI = 'RF_FI_{}_.xlsx'.format('DeepSolar_Model_2019-12-30_mega'+'_tc{}xc{}tr{}ts{}')\n # pandas_excel_maker('__Data/__Mixed_models/policy/RF_FI_{}_.xlsx'.format(model_vars),\n if self.newfile_FI is not None:\n pandas_excel_maker(self.newfile_FI,\n params=feates)\n\ndef display_significance(feature_sig, features, verbose=False):\n rd = {}\n for s, f in zip(feature_sig, features):\n rd[f] = s\n\n sorted_rd = dict(sorted(rd.items(), key=operator.itemgetter(1), reverse=True))\n if verbose:\n display_dic(sorted_rd)\n return sorted_rd\n\ndef GJ_sklearn_train_test(df, target, trsz=.50, cv=2, rl=None, verbose=True):\n\n if rl is None:\n rl = rmv_list(df.columns.values.tolist(), target)\n\n # targets0 = df[target].values.flatten()\n targets0 = df.loc[:, target].values.tolist()\n targets0 = [x[0] for x in targets0]\n print(targets0)\n df = df.loc[:, rl]\n if verbose:\n print(df.describe())\n print()\n ts = .50\n tr = 1 - ts\n # Create training and testing sets for the data\n X_train0, X_test0, y_train0, y_test0 = train_test_split(df, targets0, stratify=targets0, test_size=ts,\n train_size=tr)\n return (X_train0, y_train0), (X_test0, y_test0)\n\ndef get_suggested_eta(N, denom=12):\n return N/denom\n\ndef get_suggested_perp(N, pct=.01):\n return N * pct\n\ndef performance_logger(performance_dict, log_file, verbose=False):\n \"\"\"\n will store the performance results of some form of testing\n :param performance_dict: dictionary where keys are the metric/parameter, and vals are results\n :param log_file: the file name you want to use to store the results\n :param verbose: how much of the process you want displayed to std out\n :return: None\n \"\"\"\n # check for file and if not found make it\n\ndef process_grid_input():\n lcnq = input(\"perform lcn?: y/n\")\n if lcnq.lower() == 'y':\n lcn_reduce = True # want to reduce it by correlation filtering?\n gmtc = int(input('minimum target correlation? (-1) for none: '))\n gmxcc = int(input('maximmum predictor cross correlation? (2) for none: '))\n else:\n lcn_reduce = False # want to reduce it by correlation filtering?\n use_full = input('Use the full model (y) or a select predictor set (n)? (y/n): ')\n if use_full.lower() == 'n':\n use_full = False\n usecols = input('Give me the name of the attrib file: ')\n usecols = pd.read_excel(usecols)['variable'].values.tolist()\n else:\n use_full = True # do you want to use the full model or select features\n usecols = None # if allowed to be none will use the drops list\n tssp = float(input('validation set percentage (ex. .50): '))\n current_model = input('Give me the name or path to the model file: ') # the model file to load\n scaler_ty = 'None'\n #s = 0\n #cv = 3\n #n_est = 2200\n #crit = 'entropy'\n #mx_dth = 20\n #print_tree = True\n\ndef load_tree_trunc_features(df=None, dffile=None, limit=.00, verbose=False):\n if df is None:\n df = pd.read_excel(dffile, usecols=['Variable', 'Imp_trunc'])\n\n df = df.loc[df['Imp_trunc'] >= limit, 'Variable']\n print(list(df))\n return list(df)\n\n\ndef forward_sub2(Train_data, Test_data, feats, clf, verbose=True):\n \"\"\"performs forward substitution dimension reduction\n :param Train_data: list for X,y of training data\n :param Test_data: list for X,y of testing data\n :param feats: features to test\n :param clf: the classifiery to test, must have a fit method\n :param verbose:\n :return: the list of all vars that lead to increase in performance\n \"\"\"\n # set up vars\n # need a used up list\n from _products.performance_metrics import calculate_vif, calculate_log_like\n best_scr, BRsqr = 0, 0\n used, good, goodR2, current = list(), list(), list(), list()\n best_R2, BRacc = 0, 0,\n tvar = list(feats[:])\n Rtvar = list(feats[:])\n cadd = None\n better_score = True\n # go through checking each variable one by one\n # subing in values\n while better_score:\n better_score = False\n cadd = None\n # go through each of the remaining vars\n # looking for best result, and adding the one that leads to this\n for var in tvar:\n if var not in good:\n current = good + [var]\n v = clf.fit(Train_data[0].loc[:,current ], Train_data[1])\n\n if v is not None:\n print('NEED TO HANDLE THE ISSUE')\n continue\n # tr_scr = cross_val_score(clf, Train_data[0].loc[:, current], Train_data[1], cv=2).mean()\n ts_scr = clf.score(Test_data[0].loc[:,current], Test_data[1])\n Rsqr = clf.get_Macfadden()\n if verbose:\n pass\n #print('current:')\n #print(current)\n #print()\n #print('p-value of {}'.format(var))\n #print(clf.fitted_model.pvalues[var])\n #if len(current) > 1:\n # vif = calculate_vif(Train_data[0].loc[:,current ])\n # print('VIF:\\n', vif)\n #print('# ################################################3')\n #print('# ################################################3')\n #print('Anova: ')\n #print(clf.fitted_model.summary())\n #print('# ################################################3')\n #print('# ################################################3')\n if ts_scr > best_scr:\n if clf.fitted_model.pvalues[var] < .055:\n better_score=True\n print(' ****************** p value {:.3f}'.format(clf.fitted_model.pvalues[var]))\n print(' ****************** New best from {} of {}'.format(var, ts_scr))\n print(' ****************** Rsquare of {}'.format(Rsqr))\n best_scr = ts_scr\n best_R2 = Rsqr\n cadd = [var]\n if cadd is None:\n print('Good Accuracy list, score: {:.3f}'.format(best_scr))\n print(good)\n #sound_alert_file('sounds/this_town_needs.wav')\n break\n good += cadd\n print('Good is now: score: {}'.format(best_R2))\n #print(good)\n tvar = rmv_list(tvar, cadd[0])\n\n current = list()\n better_score = True\n while better_score:\n better_score = False\n radd = None\n for var in Rtvar:\n if var not in goodR2:\n current = goodR2 + [var]\n if verbose:\n pass\n #print('current list to test:')\n #print(current)\n #print()\n clf.fit(Train_data[0].loc[:, current], Train_data[1])\n ts_scr = clf.score(Test_data[0].loc[:, current], Test_data[1])\n # tr_scr = cross_val_score(clf, Train_data[0].loc[:, current], Train_data[1], cv=2).mean()\n if verbose:\n pass\n #if len(current):\n # vif = calculate_vif(Train_data[0].loc[:,current ])\n # print('VIF:\\n', vif)\n #print('# ################################################3')\n #print('# ################################################3')\n #print('Anova: ')\n #print(clf.fitted_model.summary())\n #print('p-values')\n #print(clf.fitted_model.pvalues[var])\n #print('# ################################################3')\n #print('# ################################################3')\n Rsqr = clf.get_Macfadden()\n if Rsqr > BRsqr and clf.fitted_model.pvalues[var] < .055:\n # check for significance of model\n print(clf.fitted_model.pvalues)\n better_score=True\n print(' ****************** New best Rsqr {} of {}'.format(var, Rsqr))\n print(' ****************** Accuracy of {}'.format(ts_scr))\n print(' ****************** pvalue {:.3f}'.format(clf.fitted_model.pvalues[var]))\n BRsqr = Rsqr\n BRacc = ts_scr\n radd = [var]\n\n if radd is None:\n better_score=False\n print('Best list for R squared')\n print(goodR2)\n print('Anova: ')\n print(clf.fitted_model.summary())\n sound_alert_file('sounds/this_town_needs.wav')\n break\n goodR2 += radd\n print('GoodR2 is now:')\n print(goodR2)\n Rtvar = rmv_list(Rtvar, radd[0])\n return good, goodR2, [best_scr, best_R2], [BRsqr, BRacc]\n\n\ndef forward_sub(Train_data, feats, clf, cv=2, verbose=False):\n \"\"\"performs forward substitution dimension reduction\n :param Train_data: list for X,y of training data\n :param Test_data: list for X,y of testing data\n :param feats: features to test\n :param clf: the classifiery to test, must have a fit method\n :param verbose:\n :return: the list of all vars that lead to increase in performance\n \"\"\"\n # set up vars\n # need a used up list\n best_scr = 0\n used, good, current, acc_l = list(), list(), list(), list()\n acc_inc, best_l = list(), list()\n tvar = list(feats[:])\n cadd = None\n better_score = True\n # go through checking each variable one by one\n # subing in values\n while better_score:\n better_score = False\n cadd = None\n better_score = True\n #best_scr = 0\n # go through each of the remaining vars\n # looking for best result, and adding the one that leads to this\n for var in tvar:\n if var not in good:\n current = good + [var]\n if verbose:\n print('current:')\n print(current)\n print()\n # clf.fit(Train_data[0].loc[:,current ], Train_data[1])\n tr_scr = cross_val_score(clf, Train_data[0].loc[:, current], Train_data[1], cv=cv).mean()\n if tr_scr > best_scr:\n better_score=True\n print(' ****************** New best test from {} of {}'.format(var, tr_scr))\n best_scr = tr_scr\n cadd = [var]\n\n if cadd is None:\n print('returning list')\n print(good)\n sound_alert_file('sounds/this_town_needs.wav')\n return good, best_scr, acc_l, acc_inc\n acc_l.append(best_scr)\n if len(good) == 0:\n acc_inc.append(best_scr)\n else:\n acc_inc.append(best_scr - acc_inc[-1])\n good += cadd\n print('Score: {}, Good is now:'.format(best_scr))\n print(good)\n tvar = rmv_list(tvar, cadd[0])\n # print(tvar)\n return good, best_scr, acc_l, acc_inc\n\n\n" }, { "alpha_fraction": 0.5348714590072632, "alphanum_fraction": 0.5421645045280457, "avg_line_length": 35.12261962890625, "blob_id": "16ec3be4ac482943ffd97f0e763e12838e502d56", "content_id": "94cdb78f32e06f24711bdeaba78c2b1ae39a3074", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43603, "license_type": "no_license", "max_line_length": 144, "num_lines": 1207, "path": "/utility_fnc.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nfrom abc import ABC, abstractmethod\nimport sys\nimport os\nimport string\nimport gzip\nimport shutil\nimport struct\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport array\nimport time\nfrom math import *\nimport operator\nfrom _products.DeepSolarModels import *\nfrom scipy.stats import truncnorm\npd.options.mode.use_inf_as_na = True\nfrom sklearn.linear_model import LogisticRegressionCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import FastICA\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold,cross_val_score, train_test_split\nfrom sklearn.metrics import roc_curve, precision_recall_curve, auc, make_scorer, recall_score, accuracy_score, precision_score, confusion_matrix\nimport sys\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\ndef stderr(msg, sep=' ', msg_num=-99):\n eprint(msg, sep=sep)\n quit(msg_num)\n\n\n# =========================================================\n# =========================================================\n# TODO: Generic Methods\n# =========================================================\n# =========================================================\ndef type_check(tocheck, against='dataframe'):\n \"\"\"\n will return true or false based of if the given object is of the same type as the against string argument\n :param tocheck: object to check\n :param against: type of object you want to see if it matches options are:\n * dataframe\n * string\n * float\n * int\n * numpy for numpy array\n * list\n * dict\n :return: boolean\n \"\"\"\n if against == 'dataframe':\n return type(tocheck) == type(pd.DataFrame([0]))\n elif against == 'string':\n return type(tocheck) == type(str('s'))\n elif against == 'float':\n return type(tocheck) == type(float(0))\n elif against == 'dict':\n return type(tocheck) == type(dict())\n elif against == 'numpy':\n return type(tocheck) == type(np.array([0]))\n elif against == 'int':\n return type(tocheck) == type(int(0))\n elif against == 'list':\n return type(tocheck) == type(list())\n\n# =========================================================\n# =========================================================\n# TODO: Dictionary Methods\n# =========================================================\n# =========================================================\n\ndef safe_dict_list_append(dictp, key, initl, next_step, next_p):\n dict_epoch(dictp, key, initl, next_step=next_step, next_p=next_p)\n return\n\ndef dict_epoch(dictp, key, initl, next_step, next_p):\n if key not in dictp:\n dictp[key] = initl\n next_step(dictp, next_p)\n return\n\ndef dict_list_append(dictp, key_val):\n dictp[key_val[0]].append(key_val[1])\n return dictp\n\ndef sort_dict(dic, sort_by='vals', reverse=False):\n \"\"\"\n Returns a sorted version of the given dictionary\n :param dic: dictionary to sort\n :param sort_by: 'vals' to sort by values, 'keys', to sort by keys\n :param reverse: set to to True to get largest to smallest\n :return:\n \"\"\"\n if sort_by == 'vals':\n return dict(sorted(dic.items(), key=operator.itemgetter(1), reverse=reverse))\n elif sort_by == 'keys':\n return dict(sorted(dic.items(), key=operator.itemgetter(0), reverse=reverse))\n\ndef display_dic2lvl(dic):\n for k in dic:\n print('------- {0} ------:'.format(k))\n display_dic(dic[k])\n print()\n\ndef display_dic(dic):\n for k in dic:\n print('{0}:'.format(k))\n print('--- {0}'.format(dic[k]))\n# =========================================================\n# =========================================================\n# TODO: List Methods\n# =========================================================\ndef rmv_list(l, r):\n del l[l.index(r)]\n return l\n\ndef rmv_list_list(l, rl):\n for r in rl:\n l = rmv_list(l, r)\n return l\n\ndef show_list(x):\n cnt = 1\n for l in x:\n print('{}: {}'.format(cnt, l))\n cnt += 1\n return\n\ndef show_labeled_list(x, labels):\n for l,label in zip(x, labels):\n print('{:s}: {}'.format(label, l))\n return\n\ndef make_repeated_list(n, v):\n return list([v]*n)\n\n# =========================================================\n# =========================================================\n# =========================================================\n# TODO: pandas methods\n# =========================================================\n# =========================================================\ndef fix_dataset(dataset, option=1):\n if option == 1:\n dataset.replace(-999, np.NaN)\n return dataset.dropna(axis=0)\n\ndef select_model(filename, attrib_file=None, attrib_list=None):\n '''Allows user to select from a data set only those variables they are interested in\n :param filename: data file that holds the larger set\n :param attrib_file: if desired can use an excel file to load the desired variables\n Save a list of variables into an xlsx file with a column header\n of Variables. Then pass the name of this file. The list will be\n used to build the model. Will throuw an error if the data file\n does not have column headers for these variables. If only passed\n the name of works just like a regular pandas read from excel or csv\n Looks for an xlsx or csv file.\n :param attrib_list: A list of the variables desired in model\n :return: a data frame of the data with only the desired variables\n '''\n if filename[-4:] == '.csv':\n if attrib_file is None and attrib_list is None:\n return pd.read_csv(filename)\n else:\n df = process_csv(filename=filename, attrib_file=attrib_file, attrib_list=attrib_list)\n elif filename[-5:] == '.xlsx':\n if attrib_file is None and attrib_list is None:\n return pd.read_excel(filename)\n else:\n return process_xlsx(filename=filename, attrib_file=attrib_file, attrib_list=attrib_list)\n\ndef process_csv(filename, attrib_file, attrib_list=None):\n if attrib_file is not None:\n attribs = pd.read_excel(attrib_file).loc['Variables'].values.tolist()\n return pd.read_csv(filename, usecols=attribs)\n elif attrib_list is not None:\n return pd.read_csv(filename, usecols=attrib_list)\n\ndef process_xlsx(filename, attrib_file, attrib_list=None):\n if attrib_file is not None:\n attribs = pd.read_excel(attrib_file).loc[:,'Variables'].values.tolist()\n print(attribs)\n return pd.read_excel(filename, usecols=attribs)\n elif attrib_list is not None:\n return pd.read_excel(filename, usecols=attrib_list)\n\ndef create_variable_file(df, new_name):\n \"\"\"\n This can be used to store the attributes of a selected model in an excel file\n Then the file can be used to select the those variables again for a training model\n :param df: dataframe of current model\n :param new_name: name of variable file\n :return:\n \"\"\"\n attribs = df.columns.values.tolist()\n pd.DataFrame({'Variables':attribs}).to_excel(new_name)\n return\n\ndef data_merger2(data_sets, joins=('fips', 'FIPS', 'geoid'), target=None, verbose=False, drop_joins=False,):\n \"\"\"This method can be used to merge a set of data frames using a shared\n data column. the first argument is a list of the dataframes to merge\n and the second argument is a list of the column labels used to perform the merge\n TODO: some work needs to be done for error checking\n TODO: add more flexibility in how the merge is perfomed\n TODO: make sure the copy rows are removed\n :param data_sets: a list of data frames of the data sets that are to be joined\n :param joins: a list of the column labels used to merge, the labels should be in the s\n same order as the data frames for the method to work. Right now this works\n best if the label used is the same for all. This makes sured the duplicate\n columns are not created.\n :param verbose: at this point does nothing but can be used to inform user of what\n has occured\n :return: a reference to the new merged dataframe\n \"\"\"\n\n cnt = 0\n for df in range(1,len(data_sets)):\n data_sets[0] = data_sets[0].merge(data_sets[df], left_on=joins[0], right_on=joins[df], how='left')\n if verbose:\n print(data_sets[0].columns)\n\n if drop_joins and ((joins[0] + '_x') in data_sets[0].columns.values.tolist() or (\n (joins[0] + '_y') in data_sets[0].columns.values.tolist()) or ((joins[1] + '_y') in data_sets[0].columns.values.tolist())):\n pass\n #data_sets[0].drop(columns=[(joins[0]+'_y'), (joins[0]+'_y')], inplace=True)\n if (target is not None and ((target + '_x') in data_sets[0].columns.values.tolist() or (\n (target + '_y') in data_sets[0].columns.values.tolist()))):\n data_sets[0][target] = data_sets[0].loc[:, target + '_x']\n data_sets[0].drop(columns=[(target + '_x'), (target + '_y')], inplace=True)\n if drop_joins:\n data_sets[0].drop(columns=list(joins), inplace=True)\n return data_sets[0]\n\n\n\ndef percentage_generator(df, part, total, newvar=None):\n \"\"\" will calculate the percentage\n of the total value part is in a list\n :param df:\n :param part:\n :param total:\n :return:\n \"\"\"\n if newvar is None:\n return list(df[part]/df[total])\n df[newvar] = list(df[part]/df[total])\n return\n\ndef check_cols(col1, col2):\n \"\"\"\n Takes two dictionaries and based on the columns of some data frame and\n checks to see if the column headers are the same\n :param col1:\n :param col2:\n :return:\n \"\"\"\n col1, col2 = col1.keys(), col2.keys()\n if len(col1) != len(col2):\n print('the length of the two lists do not match 1: {}, 2: {}'.format(len(col1), len(col2)))\n return 420\n cl = list()\n cnt = 0\n for f1, f2 in zip(col1, col2):\n if f1 != f2:\n cl.append((cnt,f1,f2))\n cnt += 1\n if len(cl) == 0:\n\n print('The two lists match.' )\n return 0\n print('The two lists do not match!!')\n print(\"Below is a list of the number of the non matching columns\\nwith the index and values\")\n print(cl, '\\nwas not found in both lists')\n return 1, cl\n\ndef report_var_stats(df, name, saveit=True, sort_type=None, sort_list=[], ascending=True, axis=0,\n re_nan=(-999,), verbose=False):\n \"\"\"Creates an excel file containing:\n * missing counts for each variable\n * the range for each variable\n * mean for each variable\n * standard deviation for each variable\n * the range for each variable\n * TODO: need to add skew to table\n :param df: The data frame containing the data to add to report\n :param name: The name of the new file\n :param saveit: if true report will be saved under given name\n :param sort_type: options are:\n * 'index' for row sorting\n * 'columns' for column sorting\n * None (default) for no sorting\n :param sort_list: the list of columns or indices to sort by, empty will just do lex sort\n :return: returns the newly created data frame used\n \"\"\"\n # add given list of nan representations\n for re in re_nan:\n df.replace(re, np.nan)\n # grab stat statistices\n descr = df.describe()\n # grab total number of entries\n if verbose:\n print('------- There are {:d} entries in the set -------')\n N = len(df)\n # set the indices to that of the given data frame dummy\n dfskew = df.skew()\n print('skew index\\n',dfskew.index)\n print('given df index\\n',df.index)\n rdic = {'Missing':[], 'Range':[], 'Mean':[], 'std':[], 'Skew':[]}\n #rdic = {'Missing':[], 'Range':[], 'Mean':[], 'std':[]}\n for var in descr.columns.values.tolist():\n rdic['Missing'].append(N-descr.loc['count',var])\n rdic['Range'].append([np.around(descr.loc['min', var], 4), np.around(descr.loc['max', var],4)])\n rdic['Mean'].append(descr.loc['mean',var])\n rdic['std'].append(descr.loc['std',var])\n rdic['Skew'].append(dfskew.loc[var])\n # create data from from created dictionary\n rdf = pd.DataFrame(rdic, index=descr.columns.values.tolist())\n if sort_type is not None:\n if sort_type == 'value':\n rdf.sort_values(by=sort_list, axis=axis, inplace=True, ascending=ascending)\n elif sort_type == 'index':\n rdf.sort_index(axis=axis, inplace=True, ascending=ascending)\n if saveit:\n rdf.to_excel(name)\n return rdf\n\ndef concat_columns(df, cols, datas, verbose=False):\n rdf = {}\n for col, data in zip(cols, datas):\n if verbose:\n print('col',col)\n print('data',data)\n rdf[col] = df[col].values.tolist()\n rdf[col].append(data)\n rdfdf = pd.DataFrame(rdf)\n if verbose:\n print('return df', rdf)\n return rdfdf\n\ndef concat_col(df, col, data, verbose=False):\n ldf = df[col].values.tolist()\n dl = [data]\n if verbose:\n print('data frame \\n', ldf,'\\ndata\\n', dl)\n return ldf + dl\n\ndef data_merger(data_sets, joins=('fips', 'FIPS', 'geoid'), target=None, verbose=False, drop_joins=False,):\n \"\"\"This method can be used to merge a set of data frames using a shared\n data column. the first argument is a list of the dataframes to merge\n and the second argument is a list of the column labels used to perform the merge\n TODO: some work needs to be done for error checking\n TODO: add more flexibility in how the merge is perfomed\n TODO: make sure the copy rows are removed\n :param data_sets: a list of data frames of the data sets that are to be joined\n :param joins: a list of the column labels used to merge, the labels should be in the s\n same order as the data frames for the method to work. Right now this works\n best if the label used is the same for all. This makes sured the duplicate\n columns are not created.\n :param verbose: at this point does nothing but can be used to inform user of what\n has occured\n :return: a reference to the new merged dataframe\n \"\"\"\n\n cnt = 0\n if len(data_sets) == 1:\n return data_sets[0]\n for df in range(1,len(data_sets)):\n data_sets[0] = data_sets[0].merge(data_sets[df], left_on=joins[0], right_on=joins[df], how='left')\n if verbose:\n print(data_sets[0].columns)\n\n if (joins[0] + '_x') in data_sets[0].columns.values.tolist() or (\n (joins[0] + '_y') in data_sets[0].columns.values.tolist()):\n data_sets[0].drop(columns=[(joins[0]+'_x'), (joins[1]+'_y')], inplace=True)\n if target is not None and ((target + '_x') in data_sets[0].columns.values.tolist() or (\n (target + '_y') in data_sets[0].columns.values.tolist())):\n data_sets[0][target] = data_sets[0].loc[:, target + '_x']\n data_sets[0].drop(columns=[(target + '_x'), (target + '_y')], inplace=True)\n if drop_joins:\n data_sets[0].drop(columns=list(joins), inplace=True)\n return data_sets[0]\n\ndef generate_excel_descending_list_dic(dic, headers):\n h1l, h2l = list(), list()\n for h1 in dic:\n h1l.append(h1)\n h2l.append(dic[h1])\n return {headers[0]:h1l,\n headers[1]:h2l}\n\ndef generate_excel(dic=None, df=None, name='df_excel2.xlsx', index=False):\n \"\"\"\n The method will generate an excel file of the given name from either a given dictionary\n or a given data fram\n :param dic: a dictionary that will be converted to an data\n frame and then writen to an excel file\n :param df: data frame to write to file\n :param name:\n :return:\n \"\"\"\n if dic is None:\n df.to_excel(name, index=index)\n else:\n df = pd.DataFrame(dic)\n df.to_excel(name, index=index)\n return\n\ndef create_combo_var_sum(df, list_to_sum, newvar=None):\n if newvar is None:\n return df.loc[:, list_to_sum].sum(axis=1).values.tolist()\n df[newvar] = df.loc[:, list_to_sum].sum(axis=1).values.tolist()\n return\n\ndef add_renewable_gen(df, val, dictl):\n df['Ren'] = list([0]*len(df))\n for st in dictl:\n df.loc[df[val] == st, 'Ren'] = dictl[st]\n #return df\n return\n\ndef store_var_ranges(df, vars):\n \"\"\"\n takes a data frame and the variables you want to find the ranges of and returns a new data frame\n with one column of the variables and the other the corresponding varialbes range\n :param df:\n :param vars:\n :return:\n \"\"\"\n var_stats = df.describe()\n var = list()\n ranges = list()\n for v in vars:\n var.append(v)\n ranges.append('[{0}, {1}]'.format(var_stats.loc['min', v], var_stats.loc['max', v]))\n return pd.DataFrame({'Variable':var, 'Original Range':ranges})\n\n\ndef recode_var_sub(sought, check, keyd):\n \"\"\"\n will create a list of recoded variables based on a list of substrings(sought) that will be\n searched for in the check list, useing the recode map keyd\n :param sought:\n :param check:\n :param keyd:\n :return:\n \"\"\"\n rl = list()\n for c in check:\n for substr in sought:\n print(substr)\n print(c)\n if pd.isna(c):\n print('bad c!',c)\n rl.append(np.nan)\n break\n elif substr in c:\n print(c)\n print(substr)\n rl.append(keyd[substr])\n break\n return rl\n\ndef load_model_attribs(filename, colname='Variables'):\n \"\"\"\n Loads a set of features from a given excel file\n :param filename:\n :param colname:\n :return:\n \"\"\"\n return pd.read_excel(filename).loc[:,colname].values.tolist()\n\n\ndef thresh_binary_recode(df, var, valthresh=0):\n bin_re = list([0]*df.shape[0])\n print('new list is of size {}'.format(len(bin_re)))\n df[var + '_bin'] = bin_re\n df.loc[df[var] > valthresh, var + '_bin'] = 1\n\ndef generate_mixed(df, vars, mix_name):\n df[mix_name] = df[vars[0]].values.tolist()\n for v in range(1, len(vars)):\n df[mix_name] = (df[mix_name].values * df[vars[v]].values).tolist()\n\ndef shuffle_deck(deck):\n np.random.shuffle(deck.values)\n\n\n# =========================================================\n# =========================================================\n# TODO: list methods\n# =========================================================\n# =========================================================\n\n\n# =========================================================\n# =========================================================\n# TODO: I/O methods\n# =========================================================\n# =========================================================\ndef read_line_binary(f, b=1, ignore=None, stop=b'\\n'):\n ch = ''\n line = ''\n while ch != stop:\n ch = f.read(b).decode('utf-8')\n line += ch\n return line\n\ndef process_ppm(file, verbose=False):\n f = open(file, 'rb')\n #magic_number = f.read(1)\n magic_number = read_line_binary(f, b=1, stop='\\n').strip().split()\n second_line = read_line_binary(f, b=1, stop='\\n').strip().split()\n width = int(second_line[0])\n height = int(second_line[1])\n third_line = read_line_binary(f, b=1, stop='\\n').strip().split()\n max_val = int(third_line[0])\n data_samples = list()\n if verbose:\n print('magic number:', magic_number)\n print('width:', width)\n print('height:', height)\n print('Max value:',max_val)\n original_header = {'magic_number': magic_number[0],\n 'width':width,\n 'height':height,\n 'Max_value':max_val}\n for h in range(height*int(width)):\n sample = list([])\n for i in range(3):\n next = struct.unpack('B', f.read(1))\n if next in (b' ', b'\\t'):\n continue\n sample.append(next[0])\n data_samples.append(sample)\n\n return pd.DataFrame(data_samples, columns=['r', 'g', 'b'], dtype=np.int), original_header\n\ndef write_ppm(file, data, header_dict, verbose=False):\n \"\"\"This will write a ppm to the given file name/destination\n using the header_dict, to save the magic_number, width,\n height, max size and then the data (pandas data frame or numpy\n array) to save the pixels (red, green, blue) of the ppm\n :param file: the name or destination\\name that you want to save the new ppm as.\n must end in .ppm to work as one\n :param data: a pandas data frame or numpy array that stores the pixel rgb values\n in each row.\n :param header_dict: dictionary storing the following ppm header information\n header_dict['magic_number'] = P3 for ascii file and P6 for binary\n header_dict['width'] = number of columns of the ppm\n header_dict['height] = number of rows of the ppm\n header_dict['Max_size] = the maximum pixel value\n this is used to determing if there are 1\n byte r/g/b values (max <=256) or if 2 byte red, and green,\n and blue values(max > 256).\n :param verbose: used for debugging\n :return:\n \"\"\"\n mn = header_dict['magic_number']\n w = header_dict['width']\n h = header_dict['height']\n mx = header_dict['Max_value']\n # make a string for the header information\n ppm_header = f'{mn}\\n{w} {h}\\n{mx}\\n'\n\n f = open(file, 'wb')\n # write the header to file\n f.write(bytearray(ppm_header, 'utf-8'))\n\n cnt = 0\n print('length of values', len(data.values))\n for v in data.values:\n pckr = struct.pack('B', v[0])\n pckg = struct.pack('B', v[1])\n pckb = struct.pack('B', v[2])\n f.write(pckr)\n f.write(pckg)\n f.write(pckb)\n cnt += 1\n print('there were {} pixels written'.format(int(cnt)))\n f.close()\n\ndef write_ppm2(file, data, header_dict, verbose=False):\n import codecs\n mn = header_dict['magic_number']\n w = header_dict['width']\n h = header_dict['height']\n mx = header_dict['Max_value']\n # magic number\n ppm_header = f'P6\\n{w} {h}\\n{mx}\\n'\n sender = []\n for v in data.values.tolist():\n sender += v\n print(len(sender))\n image = array.array('B', sender)\n #f.write(bytearray(ppm_header, 'ascii'))\n #with codecs.open(file, 'w', 'utf-8-sig') as f:\n # f.write(ppm_header)\n # image.tofile(f)\n #quit(-171)\n #f = codecs.open(file, 'w', 'utf-8')\n f = open(file, 'wb')\n\n f.write(bytearray(ppm_header, 'ascii'))\n #f.write(bytes(str(header_dict['Max_value'])+'\\n', encoding='ascii'))\n cnt = 0\n print('length of values', len(data.values))\n for v in data.values:\n #f.write(bytes(str(' '), encoding='ascii'))\n #for p in v:\n pckr = struct.pack('B', v[0])\n pckg = struct.pack('B', v[1])\n pckb = struct.pack('B', v[2])\n\n #f.write(bytes(str(v[0]), encoding='utf-8'))\n #f.write(bytes(str(' '), encoding='ascii'))\n #f.write(bytes(str(v[1]), encoding='utf-8'))\n #f.write(bytes(str(' '), encoding='ascii'))\n #f.write(bytes(str(v[2]), encoding='utf-8'))\n f.write(pckr)\n #f.write(bytearray(str(v[0]), encoding='utf-8'))\n #f.write(bytes(str(' '), encoding='ascii'))\n f.write(pckg)\n #f.write(bytearray(str(v[1]), encoding='utf-8'))\n # f.write(bytes(str(' '), encoding='ascii'))\n f.write(pckb)\n #f.write(bytearray(str(v[2]), encoding='utf-8'))\n #f.write(bytes(str(' '), encoding='ascii'))\n #if cnt < len(data)-1:\n # f.write(bytes(str('\\n'), encoding='ascii'))\n #f.write(bytes(str(' '), encoding='ascii'))\n cnt += 1\n #if cnt%(int(header_dict['width'])) == 0:\n # f.write(bytes(str('\\n'), encoding='ascii'))\n #else:\n # pass\n #f.write(bytes(str(' '), encoding='ascii'))\n #f.write(bytes(str('\\n'), encoding='ascii'))\n print(cnt)\n f.close()\n\n\n\n# =========================================================\n# =========================================================\n# TODO: file manipulation and scripting methods\n# =========================================================\n# =========================================================\ndef test_runs(exe_file, numruns=1, r1=None, r2=None):\n if numruns is not None:\n for i in range(numruns):\n os.system('python {}'.format(exe_file))\n else:\n print('range')\n for i in range(r1,r2):\n print('python {}'.format(exe_file + ' ' + str(i)))\n os.system('python {}'.format(exe_file + ' '+ str(i)))\n\ndef find_dir_parent(path, run=0, filename=None, ret_val=1):\n \"\"\"\n Can be used to find the file name and parent directory of a given path\n :param path:\n :param run:\n :param filename:\n :param ret_val:\n :return:\n \"\"\"\n for i in range(-1, -len(path), -1):\n if path[i] == '/':\n if run != ret_val:\n return find_dir_parent(path[:i], run=run+1, filename=path[i+1:], ret_val=ret_val)\n else:\n return (filename, path[i+1:])\n return (path)\n\n\ndef dir_maker(dir_name):\n \"\"\"\n Can be used to check for and if needed create a directory\n :param dir_name:\n :return:\n \"\"\"\n import os\n if os.path.isdir(dir_name):\n print(\"File exist\")\n return\n else:\n print(\"File not exist\")\n os.system('mkdir {}'.format(dir_name))\n return\n\ndef pandas_excel_maker(new_file, params, mode='feature', init=False):\n \"\"\"\n Can be used check for and create and excel file for data frames\n :param new_file:\n :param params:\n :param mode:\n :return:\n \"\"\"\n import os\n if os.path.isfile(new_file):\n print(\"File exist\")\n update_mode_handler(mode, new_file, params)\n return 0\n else:\n print(\"File not exist\")\n init_mode_handler(mode, new_file, list(params.keys()))\n update_mode_handler(mode, new_file, params)\n return 1\n\n\ndef init_mode_handler(mode, new_file, params):\n \"\"\"\n Will handle the initialization of a file\n :param mode:\n :param new_file:\n :param params:\n :return:\n \"\"\"\n if mode == 'feature':\n print('feature init')\n init_feat_importance(new_file, params)\n elif mode == 'performance':\n print('performance init')\n init_performance_log(new_file, params)\n\ndef update_mode_handler(mode, new_file, params):\n \"\"\"\n Will handle the updating of log files\n :param mode:\n :param new_file:\n :param params:\n :return:\n \"\"\"\n if mode == 'feature':\n print('feats')\n update_feat_importance(new_file, params)\n elif mode == 'performance':\n print('performance')\n update_performance_log(new_file, params)\n return\n\n\ndef init_feat_importance(filename, params):\n \"\"\"\n Used to initialize a feature importance average file\n hopefully will be used to get an average of the placement of a feature\n from sum number of runs and get a better feel for where the importance truly lies\n :param filename: new file to initilize\n :param params: list of the variables in the feature list\n :return:\n \"\"\"\n n_dict = init_params(params, ori='v')\n ps = list(n_dict.keys())\n vals = list(n_dict.values())\n run_count = make_repeated_list(len(params), 0)\n pd.DataFrame({'Variable':ps, 'Avg_Importance':vals, 'runs':run_count}).to_excel(filename, index=False)\n print(\"made the file {}?\".format(filename))\n return\ndef init_performance_log(filename, params):\n \"\"\"\n Used to initialize a performance log average file\n hopefully will be used to get an average of a set of test runs\n :param filename: new file to initilize\n :param params: list of metrics and parameters to store\n :return:\n \"\"\"\n n_dict = init_params(params, ori='h')\n print(n_dict)\n #ps = list(n_dict.keys())\n #vals = list(n_dict.values())\n #ps += 'runs'\n #vals += [0]\n pd.DataFrame(n_dict).to_excel(filename, index=False)\n return\n\ndef init_params(params, val=0, ori='h'):\n \"\"\" Used to initialize a dictionary with the params as keys and zeros as values\"\"\"\n n_dict = {}\n for p in params:\n if ori == 'h':\n n_dict[p] = [val]\n if ori == 'v':\n n_dict[p] = val\n return n_dict\n\ndef update_feat_importance(filename, params_updates):\n feature_file = pd.read_excel(filename, index_col='Variable')\n N = feature_file['runs'].values[0] +1\n replc_runs = list([N]* len(feature_file))\n feature_file['runs'] = list([N]* len(feature_file))\n if N > 1:\n N = 2\n print('Dividing by {}'.format(N))\n for p in params_updates:\n if p != 'runs':\n feature_file.loc[p, 'Avg_Importance'] = np.around((feature_file.loc[p, 'Avg_Importance'] + params_updates[p])/ N, 3)\n feature_file.sort_values(by=['Avg_Importance'], ascending=False).to_excel(filename, index=True)\n\n\ndef update_performance_log(filename, params_updates):\n feature_file = pd.read_excel(filename)\n N = feature_file.loc[0, 'runs'] + 1\n feature_file['runs'] = list([N] * len(feature_file))\n if N > 1:\n N = 2\n print('Dividing by {}'.format(N))\n for p in params_updates:\n print('params {}, val {}'.format(p, params_updates[p]))\n if p != 'runs':\n feature_file[p] = list((feature_file[p] + params_updates[p]) / N)\n #feature_file[p] = list((feature_file[p].values[0] + params_updates[p]) / N)\n feature_file.to_excel(filename, index=False)\n\n\ndef gen_RF_param_file(file, params=None, index=False):\n \"\"\"\n Generates a random forest parameter file that can be used to load a set of parameters\n into a random forest analysis\n :param file: name/name and path you want to save the file to\n :param params: parameter dictionary with the names and values (as a list) that you want to use\n :param index: determines if the index of the data frame will be saved\n :return: None\n \"\"\"\n param_grid0 = {\n 'n_estimators': [2200], # how many trees in forest\n 'max_features': [None], # maximum number of features to test for split\n 'criterion': ['entropy'], # how best split is decided\n 'max_depth': [None], # how large trees can grow\n 'oob_score': [True], #\n 'warm_start': [True],\n 'min_samples_leaf': [1], # The minimum number of samples required to be at a leaf node\n 'max_leaf_nodes': [None],\n 'min_weight_fraction_leaf': [0], #\n 'min_samples_split': [2],\n 'min_impurity_decrease': [0],\n 'random_state': [None],\n 'class_weight': [None]\n }\n if params is not None:\n for p in params:\n param_grid0[p] = params[p]\n pd.DataFrame(param_grid0).to_excel(file, index=index)\n return\n# =======================================================\n# =========================================================\n# TODO: Image work\n# =========================================================\n# =========================================================\n\n\n\n\n\n\n\n# =========================================================\n# =========================================================\n# TODO: timing and date methods\n# =========================================================\n# =========================================================\ndef today_is():\n \"\"\"this returns the current data and time\"\"\"\n return datetime.today().strftime('%Y-%m-%d-%H-%M-%S')\ndef get_current_date(time_only=True, type='s'):\n from datetime import datetime\n now = datetime.now()\n current_time = now.strftime(\"%H:%M:%S\")\n if time_only:\n if type == 's':\n return str(current_time).replace(':','_',5)\n elif type == 'n':\n return current_time\n return 'Current Time ={}'.format(current_time).replace(':',':',5)\ndef get_seconds_past(start):\n return time.time() - start\ndef get_minutes_past(start):\n return (time.time() - start)/60\ndef get_hours_past(start):\n return (time.time() - start)/(60*60)\ndef time_past(start):\n delta = time.time() - start\n if delta <= 60:\n return delta, 'seconds'\n elif delta > 60 and delta <= 60**2:\n return get_minutes_past(start), 'mins'\n else:\n return get_hours_past(start), 'hours'\ndef how_long(start, rtn=False):\n if rtn:\n return time_past(start)\n tm, mes = time_past\n return tm\n\n# =========================================================\n# =========================================================\n# TODO: audio cue methods\n# =========================================================\n# =========================================================\ndef sound_alert_beep(frq, dur):\n import winsound\n frequency = frq*1000 # Set Frequency To 2500 Hertz\n duration = dur*1000 # Set Duration To 1000 ms == 1 second\n winsound.Beep(frequency, duration)\n\ndef sound_alert_file(filename):\n import winsound\n winsound.PlaySound(filename, winsound.SND_FILENAME)\n\ndef sound_player_playsound(file):\n from playsound import playsound\n playsound(file)\n\ndef blocking_sound_player(filename):\n import sounddevice as sd\n import soundfile as sf\n # Extract data and sampling rate from file\n data, fs = sf.read(filename, dtype='float32')\n sd.play(data, fs)\n status = sd.wait() # Wait until file is done playing\n# =========================================================\n# =========================================================\n# TODO: Parrallel and Threading methods\n# =========================================================\n# =========================================================\ndef impt_mp():\n import multiprocessing as mp\n return mp\n\ndef get_pcount():\n mp = impt_mp()\n print(\"Number of processors: \", mp.cpu_count())\n\n# =========================================================\n# =========================================================\n# TODO: function methods\n# =========================================================\n# =========================================================\n\ndef gsummation(zipped, func):\n return sum([func(a,b) for a,b in zipped])\n\ndef Nx_gaussian(x, mu, sig, prior=1, verbose=False):\n \"\"\"Returns the result of a gaussian operation\n :param x: input sample\n :param mu: mean\n :param sig: std\n :param prior: prior probability\n :param verbose: nothing yet just a habit for debugging\n :return: the values of the gausian pdf operation\n \"\"\"\n return ((1 / (sig * sqrt(2 * np.pi)))) * np.exp((-(x - mu) ** 2) / (2 * sig ** 2))*prior\n\ndef generate_gaussian(xarray, mu, sig, prior=None, verbose=False):\n return [Nx_gaussian(x,mu,sig, prior, verbose=verbose) for x in xarray]\n\n# converts give aray to a rounded integer version\ndef get_rounded_int_array(dta):\n return np.array(np.around(dta, 0), dtype=np.int)\n\n# generates a gausian random number from the given statistics\ndef get_truncated_normal(mean=128, sd=1, low=0, upp=255):\n return truncnorm((low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)\n# =========================================================\n# =========================================================\n# TODO: GIS methods\n# =========================================================\n# =========================================================\n\ndef GIS_fips(old_fips_nums):\n \"\"\"\n This will take a list of fips numbers and append 0's\n to those that have 10 numbers for easy GIS manipulation\n :param old_fips_nums:\n :return:\n \"\"\"\n new_fips_nums = list()\n for fip in old_fips_nums:\n if len(str(fip)) == 10:\n # add a 0 to the front\n new_fips_nums.append('0' + str(fip))\n else:\n new_fips_nums.append(str(fip))\n return new_fips_nums\n\n\n\n\n\n\n\n# =========================================================\n# =========================================================\n# TODO: String manipulation methods\n# =========================================================\n# =========================================================\nLalpha_b = list(string.ascii_lowercase)\nUalpha_b = list(string.ascii_uppercase)\n\n# the probability table from the slides detailing\n# the probability of a char appearing in the english language\nenglish_1gram = pd.read_excel('english_1gram.xlsx')\nn_gramdict = {}\nfor char, val in zip(english_1gram['char'].values.tolist(), english_1gram['prob'].values.tolist()):\n n_gramdict[char] = val\n\n\n\n\nclass CeasarCipher:\n def __init__(self, msg=None, msgcoded=None, key=None):\n self.msg=msg\n self.msgcoded=msgcoded\n\n\n\n\ndef letter_index(c, chars=None):\n \"\"\"\n returns the index of the given char in the chars array.\n :param c:\n :param chars:\n :return:\n \"\"\"\n if chars is None:\n if c.isupper():\n chars = string.ascii_uppercase\n elif c.islower():\n chars = string.ascii_lowercase\n return chars.index(c)\n\ndef encode(msg, key, verbose=False):\n Lalpha_b = list(string.ascii_lowercase)\n Ualpha_b = list(string.ascii_uppercase)\n new_msg = ''\n msg = msg.split(' ')\n cnt = 0\n end = len(msg)\n for word in msg:\n nword = ''\n for c in word:\n idx = letter_index(c.upper(), Ualpha_b)\n shft = (idx + key) % len(Lalpha_b)\n replace = Lalpha_b[shft]\n if verbose:\n print('the char index is {}'.format(idx))\n print('the char shifted index is {}'.format(shft))\n print('the char is {} the shift is {} which gets {} in the chars:'.format(c, idx + key % len(\n Lalpha_b),\n replace))\n nword += replace\n new_msg += nword\n if cnt < end - 1:\n new_msg += ' '\n return new_msg\n\ndef get_shifted_alpha(chr, shift):\n if chr.isupper():\n return Ualpha_b[(letter_index(chr) + shift)%26]\n return Lalpha_b[(letter_index(chr) + shift)%26]\n\n\ndef dencode(msg, key, verbose=False):\n Lalpha_b = list(string.ascii_lowercase)\n Ualpha_b = list(string.ascii_uppercase)\n new_msg = ''\n msg = msg.split(' ')\n cnt = 0\n end = len(msg)\n for word in msg:\n nword=''\n for c in word:\n idx = letter_index(c.upper(), Ualpha_b)\n orig = (idx - key)%len(Lalpha_b)\n replace = Lalpha_b[orig]\n if verbose:\n print('the encoded char index is {}'.format(idx))\n print('the chars original index is {}'.format(orig))\n print('the char is {} the shift is {} which gets {} in the chars:'.format(c, idx+key%len(Lalpha_b),\n replace))\n nword += replace\n new_msg += nword\n if cnt < end-1:\n new_msg += ' '\n return new_msg.upper()\n\ndef vectorize_msg(msg):\n msg = msg.strip().split()\n for w in range(len(msg)):\n msg[w] = msg[w].strip()\n return msg\n\ndef check_msg(msg):\n if type(msg) == type('') or type(msg) != type(list()):\n msg = vectorize_msg(msg)\n return msg\n\ndef generate_f(msg, N, verbose=False):\n \"\"\"\n will generate what the class called 'f'\n which is the probability of each char in msg\n :param msg:\n :param verbose:\n :return:\n \"\"\"\n fdict, rdict = dict(), dict()\n # msg = msg.strip().split(' ')\n msg = check_msg(msg)\n show_list(msg)\n cnt_dict = msg_char_cnt(msg)\n print('coung dict')\n print(cnt_dict)\n f_prob = msg_char_prob(cnt_dict, N)\n #print(f_prob)\n return f_prob\n for w in range(len(msg)):\n\n for word in msg:\n print()\n\n\ndef msg_char_prob(cnt_dict, N):\n prob_dict = dict()\n for chr in cnt_dict:\n prob_dict[chr] = cnt_dict[chr]/N\n return prob_dict\n\ndef Msg_char_total(msg_vec):\n \"\"\"\n Can be used to count the number of chars in a given message vector\n :param msg_vec: a string or a vector of words representing a message\n :return:\n \"\"\"\n\n if type(msg_vec) == type(''):\n msg_vec = vectorize_msg(msg_vec)\n\n if type(msg_vec) != type(list()):\n stderr('ERROR: {}'.format('msg_vec must be a string or a list of strings'), msg_num=96)\n return sum([len(w) for w in msg_vec])\n\ndef msg_char_cnt(msg):\n cnt_dict = dict()\n for word in msg:\n word_char_cnt(word, cnt_dict)\n return cnt_dict\n\ndef word_char_cnt(word, cnt_dict=None):\n if cnt_dict is None:\n cnt_dict = dict()\n def add_one(dict, key):\n dict[key] += 1\n return\n for char in word:\n dict_epoch(cnt_dict, char, 0, add_one, char)\n return\n\ndef generate_key_prob(k_d, cnt_dict, ngramdict, k):\n #print('key: ', k)\n # ['key_val', 'count_dict_value', 'ngram_dict_value']\n def sumthem(k_d, ng):\n #print('key in sumthem 1112: {}'.format(ng[0]))\n #print('cnt dict val in sumthem : {}'.format(ng[1]))\n #print('ngram in sumthem: {}'.format(ng[2]))\n if ng[0] not in k_d:\n k_d[ng[0]] = 0\n k_d[ng[0]] += ng[1] * ng[2]\n for chr in cnt_dict:\n ngram_idx= get_shifted_alpha(chr, -k).lower()\n #print('ngram_idx', ngram_idx)\n dict_epoch(k_d, k, 0, sumthem, [k, cnt_dict[chr], ngramdict[ngram_idx]])\n k_d = sort_dict(k_d, reverse=True)\n\n# now sum up prob of key and sort by largest pick top four\ndef crack_ceasar(encoded_msg, ngram_prb=None, top_num=10):\n print('Original encoded message: ')\n print(encoded_msg)\n print('---------------------------------------------------')\n NN = Msg_char_total(encoded_msg)\n msg_cnt_dict = generate_f(encoded_msg, NN)\n print('there are {} chars in message'.format(NN))\n print('the mesage dict is:')\n print(msg_cnt_dict)\n prob_1 = pd.DataFrame({'letter':list(msg_cnt_dict.keys()),\n 'Frequency':list(msg_cnt_dict.values())}).to_excel('GTA_HomeWork_Prob1_tableB.xlsx')\n # store it for the Home work key\n\n if ngram_prb is None:\n ngram_prb= n_gramdict\n key_prob = dict()\n for key in range(1, 26):\n # print('key: ', key)\n #key_prob[key] = generate_key_prob(key_prob, msg_cnt_dict, ngram_prb, key)\n generate_key_prob(key_prob, msg_cnt_dict, ngram_prb, key)\n #print(key_prob)\n key_prob = sort_dict(key_prob, reverse=True)\n print('prob dict for possible keys:')\n print(key_prob)\n top_5pos = list(key_prob.keys())[:top_num]\n cnt = 0\n for ans in top_5pos:\n print('Possible key {}: {}'.format(cnt, ans))\n print(dencode(encoded_msg, ans))\n cnt += 1\n return\n\n\n\n" }, { "alpha_fraction": 0.5017082691192627, "alphanum_fraction": 0.5142356753349304, "avg_line_length": 44.030242919921875, "blob_id": "46add87387b6d638255b9dae35cadfdbe14ee987", "content_id": "4b98f9fa58c895b72584143656015d809aa53170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68490, "license_type": "no_license", "max_line_length": 150, "num_lines": 1521, "path": "/GJ_Networks.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sys\n# #####################################################\n# #####################################################\n# # TODO: Classes for Creating a Neural network ###\n# #####################################################\n# #####################################################\n\n# logic tables used to train networks\nor_tbl = np.array([[0,0,0],[0,1,1],[1,0,1],[1,1,1]]) # input table to train for an OR function\nand_tbl = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]]) # input table to train for an AND function\nnot_tbl = np.array([[0,1],[1,1]]) # input table for NOT function\nxor_tbl = np.array([[0,0,0],[0,1,0],[1,0,0],[1,1,1]]) # input table to train for an XOR function\n\nslide_input = np.array([[.05, .10]])\nslide_output = np.array([[.01], [.99]])\nslide_H = np.array([[.15, .20],[.25, .3]])\nslide_o = np.array([[.4, .45],[.5, .55]])\ns_x = slide_input\ns_y = slide_output\n\n\n\n# the usual binary inputs to a truth table\n# just counting in binary from 0 to 3 for the four different\n# patterns of the binary inputs for the two inputs to the gates\nbin_in = or_tbl[:, [0,1]]\n\n# the expected outputs for the binary inputs of the different logic gates\nor_out = or_tbl[:, [2]]\nand_out = and_tbl[:, [2]]\nnot_out = not_tbl[:, [1]]\nxor_out = xor_tbl[:, [2]]\n\n\ndef handle_example(verbose=3):\n HLW = [[.15, .2], # h1\n [.25, .30]] # h2\n b1 = [.35, .35]\n\n OLW = [[.4, .45], # o1\n [.5, .55]] # o2\n b2 = [.6, .6]\n\n # input array\n input_size = 2\n # designate number of neurons in the hidden layer\n hidden_layers = 2\n # designate number of neurons in the put layer layer\n output_layers = 2\n # input array\n x0 = np.array([[.05, .1]])\n # output array\n yt = np.array([[.01], [.99]])\n eta1 = .5\n eta_min = .05\n epochs = 1\n kmax = int(epochs/.3)\n error1 = 'se'\n gnn = NeuralNetwork(input_size, number_layers=hidden_layers, neurons_layer=(hidden_layers, output_layers),\n activations=('logistic', 'logistic'), error=error1, epochs=epochs, threshold=.0001,\n eta=eta1, w=(HLW, OLW), b=[b1, b2], eta_min=eta_min, kmax=max,\n verbose=verbose, weight_list=(-.1, .1))\n gnn.train(x0, yt)\n return gnn\n\ndef handle_example1000(verbose=-1, ):\n HLW = [[.15, .2], # h1\n [.25, .30]] # h2\n b1 = [.35, .35]\n\n OLW = [[.4, .45], # o1\n [.5, .55]] # o2\n b2 = [.6, .6]\n\n # input array\n input_size = 2\n # designate number of neurons in the hidden layer\n hidden_layers = 2\n # designate number of neurons in the put layer layer\n output_layers = 2\n # input array\n x0 = np.array([.05, .1])\n # output array\n yt = np.array([[.01], [.99]])\n eta1 = .5\n eta_min = .05\n epochs = 1000\n kmax = epochs/.3\n error1 = 'bce'\n gnn = NeuralNetwork(input_size, number_layers=hidden_layers, neurons_layer=(hidden_layers, output_layers),\n activations=('linear', 'logistic'), error=error1, epochs=epochs, threshold=.0002,\n eta=eta1, w=(HLW, OLW), b=[b1, b2], eta_min=eta_min, kmax=int(epochs/.3),\n verbose=verbose, weight_list=(-.1, .1))\n gnn.train(x0, yt)\n return gnn\n\ndef handle_and(verbose=-1, ):\n\n HLW = [[.15, .2], # h1\n [.25, .30]] # h2\n b1 = [.35, .35]\n\n OLW = [[.4, .45], # o1\n [.5, .55]] # o2\n b2 = [.6, .6]\n\n # input array\n input_size = 2\n # designate number of neurons in the put layer layer\n output_layers = 1\n # input array\n x0 = np.array(bin_in)\n # output array\n yt = np.array([[0],[0],[0],[1]])\n print('binary inputs {}'.format(x0))\n print('and outputs {}'.format(yt))\n eta1 = 10\n eta_min = .05\n epochs = 300\n kmax = epochs/.3\n error1 = 'bce'\n gnn = NeuralNetwork(input_size, number_layers=1, neurons_layer=(output_layers, ),\n activations=('logistic',), error=error1, epochs=epochs, threshold=.0002,\n #eta=eta1, w=(HLW, OLW), b=[b1, b2], eta_min=eta_min, kmax=int(epochs/.3),\n eta=eta1, w=None, b=[[-1]], eta_min=eta_min, kmax=int(epochs/.3),\n verbose=verbose, weight_list=(-.5, .5, 1, 2, -2, -1))\n gnn.train(x0, yt)\n print(np.around(gnn.predict(x0,yt), 0))\n\n return gnn\n\ndef handle_xor(verbose=-1, ):\n HLW = [[.15, .2], # h1\n [.25, .30]] # h2\n b1 = [.35, .35]\n\n OLW = [[.4, .45], # o1\n [.5, .55]] # o2\n b2 = [.6, .6]\n\n # input array\n input_size = 2\n # designate number of neurons in the put layer layer\n hidden_layers=2\n output_layers = 1\n # input array\n x0 = np.array(bin_in, dtype=np.float64)\n # output array\n yt = np.array([[0], [1], [1], [0]])\n print('binary inputs {}'.format(x0))\n print('and outputs {}'.format(yt))\n eta1 = .4 # .9 no, works .5\n eta_min = .5\n epochs = 1500 # 1500, 1500\n #kmax =int(epochs / .5)\n kmax = 10\n error1 = 'bce'\n gnn = NeuralNetwork(input_size, number_layers=2, neurons_layer=(hidden_layers, output_layers),\n activations=('logistic', 'logistic',), error=error1, epochs=epochs, threshold=.0002,\n # eta=eta1, w=(HLW, OLW), b=[b1, b2], eta_min=eta_min, kmax=int(epochs/.3),\n eta=eta1, w=None, b=[[-1, -1], [-1]], eta_min=eta_min, kmax=kmax,\n verbose=verbose, weight_list=(-.5, .5, 1, 2, -2, -1), update_eta=False)\n gnn.train(x0, yt)\n y_loglike = np.around(gnn.predict(x0, yt), 0)\n yp = [ yll for yll in y_loglike]\n\n print('outputs from network:\\n{}'.format(yp))\n print('accuracy: {}%'.format(accuracy(yt, yp) * 100))\n return gnn\n\ndef load_settings(method, verbose=True):\n \"\"\"\n This method is used to process the command line argument and perform the desired task\n :param method: which of the 3 tasks to perform. options are:\n * 'example'\n - run 1 epoch of the class example\n - Note: can be run for 1000 epochs for slide value comparison\n * 'and'\n - create and train a perceptron for the and function\n * 'xor'\n - a) create and attempt to train an perceptron for the xor function\n - b) create and attempt to train an two layer NN with a perceptron output for the xor function\n :param verbose: how much of the training you wish to have displayed on the screen\n Options:\n - verbose:\n = -1 for none but the final error result\n :return:\n \"\"\"\n print('running {}'.format(method))\n if method == 'example':\n gnn = handle_example(verbose)\n elif method == 'example1000':\n gnn = handle_example1000(verbose)\n elif method == 'and':\n gnn = handle_and(verbose)\n elif method == 'xor':\n gnn = handle_xor(verbose)\n return gnn\n\nclass Neuron:\n # source for equations used to code the activation functions:\n # https://towardsdatascience.com/activation-functions-neural-networks-1cbd9f8d91d6\n # source for the equations used to code the error functions:\n # https://ml-cheatsheet.readthedocs.io/en/latest/loss_functions.html\n # https://towardsdatascience.com\n # http://wiki.fast.ai/index.php/Log_Loss\n \"\"\"\n Class object representing a neuron:\n This class represents a neuron in a artificial neural network (ANN).\n duties:\n * process input\n * calculate an produce output\n * calculate error (store in 1x1 array for storage into layer object, same for next two)\n * (if output layer) calculate error methods derivative with x*W + b for gradient decent\n * calculate the backpropagation value, weight/bias update values,\n Has methods/abilities:\n * calculate():\n * takes a 1xn numpy array of values for the input\n * calls in turn\n * process_input():\n performs summation operation on weigted inputs and adds the bias value\n * activation():\n is passed the result of process_input() as the parameter and calculates\n the neurons response\n * produces outputs based on the activation function selected, has several options\n * sigmoid/logistic\n * log likelihood in prob_pred array\n * binary output based on log likelihood\n * linear\n * real number value of summation\n * calculate_loss:\n used to calculate the loss of the neurons predicted outputs\n * update_weights/bias:\n * update the weights (sensitivity to or magnitude of influence of variable)\n * update bias or intercept(linear)/threshold(logistic/sigmoid) value for neuron\n has class variables:\n * activation_func: the activation function used for this neuron\n * eta: learning rate\n * input_size: the number of inputs\n * loss: the loss calculated for this neuron\n * loss_prim: the deriviative loss calculated for this neuron used for GD\n * w: initial weight values if so desired\n * b: initial bias value if so desired\n * ID: index into a layers neurons that id's this neuron\n\n\n Instantiate with: Neuron( input_size, eta=.01, w=None, activation='linear', error='mse', verbose=False)\n * input_size: the number of inputs (int >= 0), pass 0 to test with just 1, for future\n statistical calculations. NOTE: this can also be ignored if desired if and only\n if a weights array is the length of number of inputs\n * (optional) eta: learning rate for gradient descent/training (default=.01)\n * (optional) w: numpy weights array, size of array must match input_size (default=None)\n if set to None the weights will be random set to either -.01 or .01\n *(optional) activation: string or integer selecting the activation function for this neuron. The\n options are:\n * 0 or 'sigmoid' for the logistic activation function\n * 1 or 'linear' for the linear activation function\n * 2 or 'relu' for the relu activation function\n * 3 or 'tanH' for the tanH activation function\n * TODO: fix this -- 4 or 'relu' for the activation function\n * 5 or 'softplus' for the softplus activation function\n * 6 or 'arctan' for the arctan activation function\n * 7 or 'perceptron' for the perceptron activation function\n\n * (optional) error: string, the error function for the gradient descent (default 'mse')\n options are:\n * 'mse': mean square error\n * 'bce': binary cross entropy loss\n * 'mae': mean absolute error\n * (optional) verbose: boolean, controls how much is printed to the screen during the process\n (default, False), If False nothing is printed except the end results, otherwise\n various things during the training, process will be printed to standard out\n\n\n \"\"\"\n # #####################################################################################################\n # class variables for testing and such\n # #####################################################################################################\n # a dictionary used to convert from numerical or string based activation function selection\n activation_dictS = {'sigmoid':8, 'linear':1, 'relu':2, 'tanh':3, 'softplus':5, 'arctan':6, 'perceptron':7,\n 'logistic':0,}\n error_dictS = {'se':0, 'crossentropy':1, 'mae':2, 'rmse':3, 'hinge':4, 'huber':5, 'kullback':6, 'mse':2, 'acc':7,}\n ########################################################################################################\n ########################################################################################################\n def __init__(self, input_size, eta=.01, activation_fnc=0, error='se', b=1, w=None, ID=0, verbose=-1,\n pweights=(-.01, .01), eta_min=.001, kmax=100, ):\n # intialize using passed parameters\n self.verbose=verbose\n self.input_size = input_size # number of input signals to process\n self.eta=eta # learning rate, how fast we will attempt to approach the minimum\n self.orig_eta = eta\n self.eta_min = .5\n self.k = 0\n self.kmax = 50\n self.error = self.type_check(error, '', self.Lcase_adj) # error method to use, use type check and Lcase_adj to set to lower case if a string\n self.activation_fnc = self.type_check(activation_fnc, '', self.Lcase_adj) # the type of activation function to use\n self.loss = np.array([0], dtype=np.float) # will store the loss of the neurons predicted output\n # handle the cases when there are no weights given, randomize weight values pulling from pweights list,\n # or check the passed array for error and store it\n self.w = self.process_w_param(w, pweights, input_size, verbose)\n '''\n if w is None:\n pweights = list(pweights)\n pweights.sort()\n #weights = np.linspace(weights[0], weights[1], input_size+int(np.around((.5*input_size), 0)), endpoint=True)\n self.w = np.array(np.random.choice(pweights, input_size, replace=True))\n print('w',self.w)\n else:\n # check the type of the array of the array matches the number of inputs\n if type(w) != type(np.array([0])):\n if self.verbose:\n print('converting to numpy array...')\n w = np.array(w)\n if w.shape[0] != self.input_size:\n print('ERROR: size of weights array {} != length w array {}'.format(len(w), self.input_size))\n quit(-155)\n self.w = w\n '''\n self.b = np.array([b], dtype=np.float) # bias or threshold to overcome, treat as a value/weight\n\n self.x=np.zeros(input_size, dtype=np.float) # will store the input to the neuron\n self.y = np.zeros(1, dtype=np.float) # will hold the ground truth for the current input\n # self.b = np.array([-1], dtype=np.float) # bias or threshold to overcome\n\n self.loss_Prime = np.array([0], dtype=np.float) # will store the loss of the neurons predicted output\n self.act_prime = np.array([0], dtype=np.float)\n self.ID = ID # ID is an integer value used to identify it in a layer\n self.pred_prob = np.array([0], dtype=np.float) # will be used to store predicted probability for sigmoid etc.\n self.output = np.array([0], dtype=np.float) # will be used to store output value\n self.del_w = np.zeros(input_size, dtype=np.float) # used to update weight array\n self.del_b = np.zeros(1, dtype=np.float) # used to update bias value\n self.w_updates = list() # will hold past update values\n self.b_updates = list() # will hold past update values\n self.verbose = verbose\n self.z=None # will store the result of the activation function\n # if no initial weights given\n # then set them to random values of either\n # -.1 or .1, with input_size elements\n\n # check for inputs for errors and quit if some are found\n self.process_activation_fnc()\n self.error_check()\n\n def process_w_param(self,w, pweights, input_size, verbose):\n \"\"\"\n will process the weight array input parameter. If it is None the\n weights will be set to random values selected from the pweights (possible weights)\n array. Uses the input size to error check the size of given array\n :param w:\n :param pweights:\n :param input_size:\n :param verbose:\n :return:\n \"\"\"\n verbose= self.verbose\n if w is None:\n if self.verbose > 1:\n print('Creating randomized initial weights....')\n pweights = list(pweights)\n #pweights.sort()\n #weights = np.linspace(weights[0], weights[1], input_size+int(np.around((.5*input_size), 0)), endpoint=True)\n self.w = np.array(np.random.choice(pweights, input_size, replace=True))\n if self.verbose > 1:\n print('weights array w set to {}'.format(self.w))\n return self.w\n else:\n # check the type of the array and convert if needed\n if type(w) != type(np.array([0])):\n if verbose > 1:\n print('converting to numpy array...')\n w = np.array(w)\n if verbose > 1:\n print('weights array w set to {}'.format(w))\n if w.shape[0] != self.input_size:\n print('ERROR: size of weights array {} != length w array {}'.format(len(w), self.input_size))\n quit(-155)\n # self.w = w[:]\n return w[:]\n\n def type_check(self, tocheck, vtype, dothis, ):\n if type(tocheck) == type(vtype):\n return dothis(tocheck)\n return tocheck\n\n def Lcase_adj(self, toadj):\n return toadj.lower()\n\n def process_activation_fnc(self, ):\n \"\"\" Will convert a string version of the activation function into a numerical one\n :return: None\n \"\"\"\n if type(self.activation_fnc) == type(''):\n self.activation_fnc = self.activation_dictS[self.activation_fnc]\n\n def error_check(self, ):\n \"\"\" Checks input arguments for errors and stops the program if some are found\n :return: None\n \"\"\"\n err_options = ['mse', 'bce', 'mse2', 'se', 'acc','mae']\n act_options = ['sigmoid', 'linear', 'relu', 'tanH', 'arctan', 'perceptron', 'softmax', 'logistic']\n if self.input_size < 0:\n print('Error: Bad Input size, the input size must be >= 0, got a value of {}'.format(self.input_size))\n quit(-96)\n if self.eta < 0:\n print('Error: Bad learning rate(eta). Eta must be greater than zero but recieved {} '.format(self.eta))\n quit(-100)\n if self.activation_fnc not in self.activation_dictS.keys() and self.activation_fnc not in self.activation_dictS.values():\n print('Error: Unknown activation function {}, must be one of:'.format(self.activation_fnc))\n print(act_options)\n quit(-104)\n if self.error not in err_options:\n print('413Error: Unknown error function {}'.format(self.error))\n quit(-107)\n return\n\n def processInput(self, x, w, b):\n \"\"\"\n Performs summation operation for the neuron and adds bias\n :param x: inputs signals to sum\n :param w: weights of the various inputs\n :param b: bias to overcome\n :return: the value of the summation operation on the weighted inputs\n with the biase added (x0*w0 **** + b)\n \"\"\"\n self.sigma =np.dot(x, w) + b\n return self.sigma\n\n def activation(self, x, w, b):\n \"\"\" Performs the activation function calculation selected when neuron\n was instantiated\n {'sigmoid':8, 'linear':1, 'relu':2, 'tanH':3, 'softplus':5, 'arctan':6, 'perceptron':7, 'logistic':0,}\n :param x: input value numpy array from inputing neurons\n :param w: weights on inputs\n :return:\n \"\"\"\n #if self.activation_fnc == 0 or self.activation_fnc == 'sigmoid' or self.activation_fnc == 'logistic':\n if self.activation_fnc in [0, 'logistic']:\n return self.logistic(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [8, 'sigmoid']:\n return self.sigmoid(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [1, 'linear']:\n return self.linear(self.processInput(x, w.transpose(), b))\n elif self.activation_fnc in [2, 'relu']:\n return self.relu(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [3, 'tanH']:\n return self.tanH(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [4, 'relu']:\n return self.relu(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [5, 'softplus']:\n return self.softplus(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [6, 'arctan']:\n return self.garctan(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc in [7, 'perceptron']:\n if self.verbose > 1:\n print('perception:')\n print('activation function {}'.format(self.activation_fnc))\n return self.perceptron(self.processInput(x,w.transpose(),b))\n\n def activation_funcPrime(self, x=None, w=None, b=None):\n \"\"\" Performs the derivative of the activation function calculation selected when neuron\n was instantiated\n :param x: input value numpy array from inputing neurons\n :param w: weights on inputs\n :return:\n \"\"\"\n print('activation function', self.activation_fnc)\n if x is None:\n x = self.x[:]\n if w is None:\n w = self.w[:]\n if b is None:\n # print('bbbbbbb', self.b)\n b = self.b[:]\n\n if self.activation_fnc in [0, 'logistic']:\n return self.logistic_prime()\n elif self.activation_fnc == 1 or self.activation_fnc == 'linear':\n return self.linear_prime()\n elif self.activation_fnc == 2 or self.activation_fnc == 'relu':\n #return self.relu_prim(self.processInput(x, w.transpose(), b))\n return self.relu_prim()\n elif self.activation_fnc == 3 or self.activation_fnc == 'tanH':\n return self.tanH_prime()\n elif self.activation_fnc == 4 or self.activation_fnc == 'relu':\n return self.relu_prim()\n elif self.activation_fnc == 5 or self.activation_fnc == 'softplus':\n return self.softplus_prime()\n elif self.activation_fnc == 6 or self.activation_fnc == 'arctan':\n return self.arctan_prime()\n elif self.activation_fnc == 7 or self.activation_fnc == 'perceptron':\n return self.perceptron_prime(self.processInput(x,w.transpose(),b))\n elif self.activation_fnc == 8 or self.activation_fnc == 'sigmoid':\n return self.sigmoid_prime()\n\n def update_eta(self, k):\n self.eta = epsilon(self.orig_eta, self.eta_min, k, self.kmax)\n # called when an input is received\n # calls activation with the given input as x and stores\n # the value of the given input\n def calculate(self, x, w=None, b=None , verbose=True):\n \"\"\"\n Can be called to process an input vector\n calls activation with the given input as x and stores\n the value of the given input\n :param x:\n :param w:\n :param b:\n :return: the result of calling the activation function\n on the given input vector x with the current weight vector and bias\n \"\"\"\n if w is None:\n w = self.w\n if b is None:\n b = self.b\n self.x = x\n val = np.around(self.activation(x, w, b), 6)\n if self.verbose > 1:\n print('stuff w, x, b', self.w, self.x, self.b)\n print('val', val)\n print(self.output)\n return self.output[:]\n\n def calculate_error(self,yt, error=None):\n \"\"\" will calculate the error of the perceptron's\n output based on the error function chosen\n :param yt: ground truth output value\n :param yp: predicted output value from perceptron\n :param error: the error method to use\n :return:\n \"\"\"\n if error is None:\n error = self.error\n\n if error == 'mse':\n self.loss = MSE(yt, self.output)\n print(' ****** error:', self.loss)\n return self.loss\n elif error == 'bce':\n self.loss = BCE(yt, self.output)\n return self.loss\n elif error == 'se':\n self.loss = SE(yt, self.output)\n return self.loss\n elif error == 'mae':\n self.loss = MAE(yt, self.output)\n return self.loss\n elif error == 'acc':\n acc =accuracy(yt, np.around(self.output, 0))\n self.loss = 1 - acc\n print('loss', self.loss)\n print('acc', acc)\n return self.loss\n\n def error_Prime(self, X=None, ytruth=None, ypred=None, error=None, verbose=False):\n \"\"\" Method will calculate the derivative ot the error function\n :param X: input vector\n :param ytruth: ground truth output\n :param ypred: predicted output value\n :param error: type of error/cost function to use\n :return:\n \"\"\"\n if X is None:\n X = self.x[:]\n if ytruth is None:\n ytruth = self.y[:]\n if ypred is None:\n ypred = self.output[:]\n\n ytruth.reshape(ytruth.shape[0], 1)\n ypred.reshape(ytruth.shape[0], 1)\n if error is None:\n error = self.error\n if error == 'mae':\n print('mae')\n msePrime_w = -1 / len(ytruth) * np.dot((ytruth - ypred) / (abs(ytruth - ypred)), X)\n msePrime_b = -1 / len(ytruth) * sum([(yt - yp) / abs(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n self.del_w = msePrime_w\n self.del_b = msePrime_b\n return [msePrime_w, msePrime_b]\n elif error == 'mse':\n print('mse')\n print('pred', ypred.shape)\n print('truth', ytruth.shape)\n #msePrime_w = -2 / len(ytruth) * np.dot((ytruth - ypred), X)\n msePrime_w = -2 / len(ytruth) * sum([(yt - yp)*x for yt, yp, x in zip(ytruth, ypred, X)])\n msePrime_b = -2 / len(ytruth) * sum([(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n self.del_w = msePrime_w\n self.del_b = msePrime_b\n return [msePrime_w, msePrime_b]\n elif error == 'se':\n print('se')\n print()\n #print('pred', ypred.shape)\n #print('pred', ypred)\n #print('truth', ytruth.shape)\n #print('truth', ytruth)\n #msePrime_w = -2 / len(ytruth) * np.dot((ytruth - ypred), X)\n msePrime_w = -1 / len(ytruth) * sum([(yt - yp)*x for yt, yp, x in zip(ytruth, ypred, X)])\n msePrime_b = -1 / len(ytruth) * sum([(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n self.del_w = np.array(msePrime_w)\n self.del_b = np.array(msePrime_b)\n return [msePrime_w, msePrime_b]\n elif error == 'bce':\n print('bce')\n msePrime_w = sum([(-yt/(max(1e-15, yp))) + ((1-yt)/max((1-yp), 1e-15)) for yt, yp in zip(ytruth, ypred)])\n msePrime_b = sum([(-yt/(max(1e-15, yp))) + ((1-yt)/(max(1e-15, 1-yp)) ) for yt, yp in zip(ytruth, ypred)])\n self.del_b = msePrime_b\n #self.dels_past.append(self.del_b)\n #self.del_w = msePrime_w\n return [msePrime_w, msePrime_b]\n elif error == 'acc':\n print('accuracy')\n accPrime_w = -(1/len(ytruth))\n accPrime_b = -(1/len(ytruth))\n self.loss = -(1/len(ytruth))\n return self.loss\n else:\n print('ERROR: Unknown error method {}, must be one of:'.format(error))\n print(list(self.error_dictS.keys()))\n quit(-215)\n\n def process_Binary_output1(self, val, thresh=.5):\n self.pred_prob[:] = val # store current predicted val\n if val > thresh:\n return 1\n return 0\n\n def update_weights(self, delta, lr=None):\n if self.verbose > 1:\n print('wdelta', delta)\n if lr is None:\n lr = self.eta\n self.w_updates.append(delta)\n self.w[:] = self.w - lr*delta\n return\n\n def update_bias(self, delta, lr=None):\n if self.verbose > 1:\n print('b delta', delta)\n if lr is None:\n lr = self.eta\n self.b_updates.append(delta)\n self.b[:] = self.b - lr*delta\n return\n\n def backpropagate(self, x,y, yp, error=None):\n # set up the arrays for updateing\n # the weights and biases\n\n delta = self.error_Prime(x,y,yp, error=None)\n pass\n\n def logistic(self, z, verbose=-1):\n self.z = z\n print(' ------------------------------ verbose ', self.verbose)\n if self.verbose > 1:\n print('logistic')\n print('z', z)\n \"\"\"The Logistic function.\"\"\"\n self.output[:] = 1.0 / (1.0 + np.exp(-z))\n return self.output[0]\n def logistic_prime(self):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n # return self.sigmoid(z) * (1 - self.sigmoid(z))\n self.act_prime = self.output * (1 - self.output)\n return self.output * (1 - self.output)\n\n def linear(self, z):\n self.z = z # store summation result\n self.output[:] = z # store output value\n return z\n\n def linear_prime(self):\n return 1\n\n '''\n # loss suggested in class\n def loss_prime(self):\n return -(self.y - self.output)\n def activation_prime(self):\n if self.activation_fnc in [0, 'sigmoid', 'logistic']:\n self.act_prime = self.output*(1 - self.output)\n return self.output*(1 - self.output)\n def calculated_my_del(self,):\n self.my_del = self.activation_prime() * self.loss_prime()\n \n spczz = 0\n '''\n ### Miscellaneous functions\n def sigmoid(self, z, verbose=-1):\n self.z = z\n if self.verbose > 1:\n print('sigmoid')\n \"\"\"The sigmoid function.\"\"\"\n self.output[:] = self.process_Binary_output1(1.0 / (1.0 + np.exp(-z)), )\n return self.output[0]\n def sigmoid_prime(self):\n \"\"\"Derivative of the sigmoid function.\"\"\"\n #return self.sigmoid(z) * (1 - self.sigmoid(z))\n self.act_prime = self.output * (1 - self.output)\n return self.output * (1 - self.output)\n\n def tanH(self, z):\n \"\"\"the Tanh activation function\"\"\"\n self.z = z\n self.output[:] = (2.0 / (1.0 + np.exp(-2 * z))) - 1\n return self.output[0]\n def tanH_prime(self):\n return 1 - (self.output[0] ** 2)\n\n def softplus(self, z):\n self.z = z\n self.output[:] = np.log(1 + np.exp(z))\n return self.output[0]\n def softplus_prime(self,):\n return 1 / (1 + np.exp(-self.z))\n\n def garctan(self, z):\n self.z = z\n self.output[:] = np.arctan(z)\n return self.output[0]\n def arctan_prime(self, ):\n return 1 / ((self.z ** 2) + 1)\n\n def perceptron(self, z):\n \"\"\"\n perceptron thresholding function, returns 1 iff\n z is non negative, otherwise returns 0\n :param z: input to threshold\n :return:\n \"\"\"\n self.z = z # store summation result\n print()\n if z >= 0:\n self.output[:] = 1\n return 1\n else:\n self.output[:] = 0\n return 0\n def perceptron_prime(self, z=None):\n if z is None:\n self.z = z\n if self.z != 0:\n return 0\n print('strange input to perceptron prime {}'.format(self.z))\n return 0\n\n def relu(self, z):\n self.z = z\n if z < 0:\n self.output[:] = 0\n return 0\n else:\n self.output[:] = z\n return z\n def relu_prim(self, ):\n if self.z < 0:\n return 0\n else:\n return 1\n\nclass FullyConnectedLayer():\n \"\"\"\n Represents a fully connected layer (collection of neurons each connected to the same set of inputs,\n and all outputting into the layer/list in an NN\n should have the same abilities of a neuron only applied to a collection of neurons\n \"\"\"\n def __init__(self, input_size, number_neurons=1, eta=.01, w=None, activation_fnc=0, verbose=-1,\n weight_list=(-.01, .01), error='se', b=None, thresh=.01, ID=0, update_eta=False):\n self.update_eta = update_eta\n self.ID = ID # the id in the network of the layer\n self.input_size=input_size # number of inputs for each neuron\n self.number_neurons=number_neurons # number of neurons in layer\n self.eta=eta # learning rate for layers neurons\n self.w = w # list of weight vectors for each neuron\n if self.w is None: # if no weights given generate random weights for each neuron\n self.w = np.array([np.array(np.random.choice(weight_list, input_size,replace=True))\n for i in range(number_neurons)])\n # print('W is now {}'.format(self.w))\n self.activation_fnc = activation_fnc # the type of activation function for each neuron\n self.verbose=verbose # used for printing to standard out if desired/ debugging\n self.neurons=list() # list of neurons in layer\n self.weights=weight_list # optional list of possible random variables to initialze weights to\n self.bias = [np.array([1.0]) for i in range(number_neurons)] # list of bias values for neurons in layer\n self.bias = np.array(self.bias)\n self.thresh=thresh # threshold for sigmoid or similar\n self.outputs = [np.array([0.0]) for i in range(number_neurons)] # will hold the output values for each neuron\n self.outputs = np.array(self.outputs)\n self.pred_probs_ = [np.array([0.0]) for i in range(number_neurons)] # will hold the predicted probabilites if using sigmoid\n self.pred_probs_ = np.array(self.pred_probs_)\n self.verbose=verbose\n self.handle_bias_array(b) # this line handels creating the list of bias values\n\n # make a list of neurons of the neccesary size\n # either loading the given weights and/or bias values\n # or generating random ones\n print('number of neurons ', number_neurons)\n for i in range(number_neurons):\n # create a new neuron\n # if given None create random weights\n if w is None:\n print('w is none')\n print('bb', b, i)\n self.neurons.append(Neuron(input_size, eta=eta, activation_fnc=activation_fnc, error=error, b = self.bias[i],\n w=np.random.choice(weight_list, input_size), ID=i, verbose=verbose))\n #self.neurons[-1].b = np.array(self.bias[i], dtype=np.float)\n else:\n self.neurons.append(\n Neuron(input_size, eta=eta, activation_fnc=activation_fnc, error=error, w=self.w[i], ID=i,\n verbose=verbose))\n self.neurons[-1].b = np.array([self.bias[i]], dtype=np.float)\n # now store a reference to the new neurons weights and bias arrays\n self.w[i] = self.neurons[-1].w[:] # store slice of last created neuron's bias\n #print('bias', self.bias)\n #print('i', i)\n print('neurons {}'.format(self.neurons))\n #self.bias[i] = self.neurons[-1].b[:] # store slice of last created neuron's bias\n self.outputs[i] = self.neurons[-1].output[:]\n self.w = np.array(self.w)\n self.bias = np.array(self.bias)\n\n def handle_bias_array(self, b):\n \"\"\" Generates the list of bias values for the\n neurons in the layer\n :param b: either None (generate all 1 bias values) or an array of\n bias values to use for each neuron\n :return: None\n \"\"\"\n if b is None:\n pass\n else:\n self.bias = b\n\n def calculate(self, X):\n \"\"\"\n Feeds input X into all the neurons in the layer, by calling calculate on them.\n The outputs are stored in the neurons output variable\n :param X:\n :return:\n \"\"\"\n #inpts = X.tolist()\n print()\n print(' ************************************************************ verbose', self.verbose)\n print()\n cnt = 0\n for n in self.neurons:\n n.calculate(X) # feed input x into neuron for processing, output stored by neuron in output\n self.outputs[cnt] = n.output[:][0]\n print('out at c {}, {}'.format(cnt, self.outputs[cnt]))\n if self.verbose > 1:\n print()\n print('----------------------------------')\n print('neuron {}'.format(n.ID))\n print('neuron w {}'.format(n.w))\n print('neuron b {}'.format(n.b))\n print('neuron output {}'.format(n.output))\n print('----------------------------------')\n print()\n cnt += 1\n\n def calculateA(self, X, verbose=False):\n \"\"\" Returns an array of outputs from the activations of each neuron\n :param X:\n :param verbose:\n :return:\n \"\"\"\n yp = list()\n # go through my neurons getting predicted outputs\n # by givinge each neuron the input vector X\n for ni in range(len(self.neurons)):\n # get a prediction from the current neuron and store it\n self.outputs[ni] = self.neurons[ni].calculate(X, self.neurons[ni].w, self.neurons[ni].b, verbose)\n if self.activation_fnc == 0 or self.activation_fnc == 'sigmoid':\n self.pred_probs_[ni] = self.neurons[ni].pred_prob\n # self.outputs[ni] = self.neurons[ni].activation(X, self.neurons[ni].w, self.neurons[ni].b)\n return self.outputs.copy()\n\n def update_weights(self, updates):\n for ni in range(len(self.neurons)):\n self.neurons[ni].update_weights(updates)\n\n def update_bias(self, updates):\n for ni in range(len(self.neurons)):\n self.neurons[ni].update_bias(updates)\n\n def pass_error_prime_to_network(self, network, yt):\n \"\"\"\n If this is the output layer will collect the error function\n derivative (delta i.e. activation' * error') values from its\n neurons and return it to it's calling network\n :param network: empty list passed from calling network\n :param yt: ground truth value for output('s)\n :return: list of delta values for the layers neurons\n \"\"\"\n for on, y in zip(self.neurons, yt):\n wu1, delni = on.error_Prime(ytruth=y) # delni = del_b\n network.append(on.del_b)\n return network\n\n def pass_activation_prime_to_network(self, network=None,):\n if network is None:\n network = list()\n \"\"\"\n If this is the output layer will collect the activation function\n derivative values from its neurons and return it to it's calling network\n :param network: empty list passed from calling network\n :return: list of the derivative of the activation function values for the layers neurons\n \"\"\"\n for on in self.neurons:\n on.activation_funcPrime()\n network.append(on.act_prime)\n return network\n\n def pass_output_layer_backpropagation_array(self, dels, verbose=True, k=1):\n bpl = list() # list of back propagation values\n # now update output layer and store backpropagation values to pass back\n for on, dta in zip(self.neurons, dels):\n # update this neurons weights array\n upda = dta * on.x\n if self.verbose > 1:\n print('neuron {}'.format(on.ID))\n print('Input x: {}'.format(on.x)) # input\n print('Initial weights w: {}'.format(on.w[:])) # initial weights\n print('Initial bias {}', on.b[:])\n print('learning rate: {}'.format(on.eta))\n print('delta for w: {}'.format(on.del_w))\n print('delta for b: {}'.format(on.del_b))\n print('dta:', dta)\n print('update: ', upda)\n print(' ******')\n\n # on.w[:] = on.w - on.eta * dta * on.x\n # update weights and bias\n on.w[:] = on.w[:] - (on.eta * upda)\n on.b[:] = on.b[:] - (on.eta * dta)\n on.w_updates.append(upda)\n on.b_updates.append(dta)\n if self.verbose > 1:\n print('Updated weights w: {}'.format(on.w[:])) # initial weights\n print('Updated bias {}', on.b[:])\n print('------------')\n print('')\n # store back propagation values for current neuron\n bpl.append(on.w * dta)\n if self.update_eta:\n on.update_eta(k)\n if self.verbose > 0:\n print(' deltas for out n1 deltas for out n2')\n print('back prop', bpl)\n return bpl\n '''\n # now sum them and pass to network\n back_prop_val1 = sum(bpl)\n print('passing this back through network {}'.format(back_prop_val1))\n '''\n\n def back_propagate(self, val):\n return val\n\n def display_layer(self):\n for n, i in zip(self.neurons, range(self.number_neurons)):\n print('{}) neuron weights and bias: {} {}'.format(n.ID, n.w, n.b))\n print('Layer weights & bias at {}: {} {}'.format(i, self.w[i], self.bias[i]))\n print('--------------------------------------------')\n\nclass NeuralNetwork:\n \"\"\" Represents a neural network. Made of several parts\n * a collection of fully connected layers\n * each fully connected layer contains some\n number of neuron objects\n \"\"\"\n activation_dictS = {'logistic': 0, 'linear': 1, 'relu': 2, 'tanh': 3, 'softplus': 4, 'arctan': 6, 'perceptron': 7,\n 'sigmoid':8,}\n error_dictS = {'se': 0, 'crossentropy': 1, 'mae': 2, 'rmse': 3, 'hinge': 4, 'huber': 5, 'kullback': 6, 'bce':1,\n 'mse':2, 'acc':7,}\n or_tbl = [[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]] # input table to train for an OR function\n and_tbl = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] # input table to train for an AND function\n not_tbl = [[0, 1], [1, 1]] # input table for NOT function\n xor_tbl = [[0, 0, 0], [0, 1, 0], [1, 0, 0], [1, 1, 1]] # input table to train for an XOR function\n\n def __init__(self, input_size, number_layers=1, neurons_layer=(1,), activations=('logistic',), error='mse',\n eta=.01, w=None, b=None, eta_min=.0001, kmax=2, verbose=-1, weight_list=(-.1, .1,), epochs=1,\n threshold=.001, update_eta=False):\n # process parameters and do some error checking\n self.update_eta=update_eta\n self.out_re = list()\n self.num_inputs=int(input_size) # the number of input features\n if self.num_inputs < 1:\n print('ERROR: number of inputs must be >= 1, was give {}'.format(self.num_inputs))\n quit(387)\n\n self.number_layers=int(number_layers) # number of fully connected layers\n if self.number_layers < 1:\n print('ERROR: number of layers must be >= 1, was give {}'.format(self.number_layers))\n quit(391)\n\n self.neuron_layer=neurons_layer # list where [# neurons layer 1, ***, # neurons layer N]\n if len(self.neuron_layer) < 1:\n print('ERROR: There must be at least 1 entry in the number of neurons per layer array'.format())\n quit()\n else:\n cnt = 0\n for i in self.neuron_layer:\n if i < 1:\n print('ERROR: number of neurons must be >= 1, was given {}\\nfor layer {}'.format(i, cnt))\n cnt += 1\n\n self.activations=activations # the activation function for each layer\n if len(self.activations) < 1:\n print('ERROR: There must be at least 1 entry in the activations per layer array'.format())\n quit(-415)\n else:\n for af in self.activations:\n if af not in self.activation_dictS.keys() and af not in self.activation_dictS.values():\n print('ERROR: Unknown activation option {}, options are:'.format(af))\n print(self.activation_dictS)\n quit(969)\n\n if (len(activations) - number_layers - len(neurons_layer)) != -len(neurons_layer):\n print('ERROR: the number of layers, number of neurons per layer, and activation functions must match.')\n print('Was given lists of sizes {}, {}, and {} for the # of layers, # neurons per layer, and activations\\n'\n 'arrays.'.format(number_layers, len(neurons_layer), len(activations)))\n quit(704)\n self.loss = error # error/loss method for gradient descent\n if self.loss not in self.error_dictS.keys():\n print('ERROR: Unknown error method {}'.format(self.loss))\n quit(-715)\n\n self.eta = eta # learning rate\n self.eta_min = eta_min # minimum learning rate if adjusted learning rate desired\n self.epochs = epochs # number of training epochs to run\n self.kmax = kmax # used with epsilon method to adjust learning rate\n self.verbose=verbose # used for debugging\n self.layers = [] # will hold the connected layer objects\n self.inputs=None # will be used to hold a set of inputs to train with\n self.threshold = threshold\n self.outs = np.zeros(neurons_layer[-1]) # get redy to store the outputs from the network\n self.losses = list()\n self.epochL = list()\n self.best_loss=1e-9\n self.ypred = list()\n self.w = w\n self.b = b\n # if none are for the weights and or bias values for the neurons in each layer\n # set an array of nones to pass to the layers\n # the the neurons in the layer will randomize the initial weights and or bias values\n if self.w is None:\n\n self.w = list()\n # get a none for each layer so we\n # can get the layers to tell the neurons\n # to randomly initialize\n for i in range(len(neurons_layer)):\n self.w.append(None)\n if self.b is None:\n self.b = list()\n for i in range(len(neurons_layer)):\n self.b.append(None)\n self.X, self.Y = None, None # set up storage space for the input and output array and\n # create need number of layers\n in_size = input_size # this will be used to determine the size of the next layer\n ID = 0\n # go through the number of neurons per layer (neuron_layer), and activation functions for\n # each layer (activations) setting up the layers of the network accordingly\n for ne, af, w, b in zip(self.neuron_layer, self.activations, self.w, self.b):\n self.layers.append(FullyConnectedLayer(in_size, number_neurons=ne, eta=eta, w=w,\n activation_fnc=af, verbose=verbose, update_eta=update_eta,\n weight_list=weight_list, error=self.loss, b=b, ID=ID))\n in_size = ne # grab the # of neurons in the last created layer to know how many inputs for the next\n ID += 1 # each layer is given an interger ID that is it's index in the network layer array\n\n '''\n def fit(self, X, Y, threshold = .01):\n self.X = X\n self.Y = Y\n\n # need to move through samples adjusting wieghts with gradient descent\n # and backpropagation\n # itearte through x making predicionts\n err = 100\n epoch = 0\n while err > threshold and epoch < self.epochs:\n cnt = 0\n # use each sample to generate a set of outputs for each layer\n for sample, response in zip(X,Y):\n # perform forward pass passing successive output/input\n # for each layer\n for lyr in range(len(self.layers)):\n # give current sample to current layer\n yp = self.layers[lyr].calculate(X, )\n if (lyr == len(self.layers) - 1):\n ypred = yp.copy()\n if self.layers[lyr].activation_fnc == 'bce':\n ypred[ypred < self.layers[lyr].thresh] = 0\n ypred[ypred >= self.layers[lyr].thresh] = 1\n self.ypred[lyr] = ypred\n # once you have went through all layers\n # calculate the error and backpropagate\n # store this layers predictions\n # if doing\n def train2(self, X, Y):\n self.fit(X,Y)\n '''\n\n def train(self, X, Y, epochs=None, verbose=True):\n \"\"\"\n trains network for either a set number of epochs or once an error threshold is met\n calls forward_pass() then back_propagate() repeatedly until above conditions met\n :param X: Inputs to network\n :param Y: True response values\n :param epochs: number of training epochs to run\n :param verbose: how verbose the training process is\n :return: None\n \"\"\"\n if epochs is not None:\n self.epochs = epochs\n if self.verbose > -1:\n print(' ---------------- Epochs set to {}'.format(self.epochs))\n err = 100\n epoch = 0\n self.best_loss = 1e9\n best_epoch=None\n err_tot = 0\n step = 0\n while epoch < self.epochs:\n if self.verbose > -1:\n print()\n print('# ####################################################################')\n print('# ####################################################################')\n print('# #################### Epoch {} ###############################'.format(epoch))\n print('# ####################################################################')\n print('# ####################################################################')\n print()\n # TODO: perform forward pass putting input through network\n # and calculating the total error\n print('X', X)\n err_tot = 0\n cnt = 1\n for x, y in zip(X,Y):\n print(x)\n err_tot += self.forward_pass([x], [y])\n cnt += 1\n # store values for learning visualizations\n self.losses.append(err_tot)\n self.epochL.append(step)\n # perform backpropagation\n self.back_propagate([y], k=epoch)\n step += 1\n err_tot /= cnt\n if verbose > -1:\n print('Epoch: {}, Total {} error: {}'.format(epoch + 1, self.loss, err_tot))\n # check the error/loss for new best and see if training can end\n if err_tot < self.best_loss:\n self.best_loss = err_tot\n best_epoch = epoch\n\n if verbose > -1:\n print()\n print(' **************************************** new best loss {} at epoch {}'.format(\n self.best_loss, best_epoch))\n print()\n if err_tot <= self.threshold:\n if verbose > -1:\n print('')\n print(' ***************** Error threshold met {}'.format(err_tot))\n print('')\n break\n epoch += 1\n\n\n def predict(self, X, Y):\n yp = list()\n for x, y in zip(X, Y):\n self.forward_pass([x],[y])\n yp.append(self.out_re)\n return yp\n\n def forward_pass(self, X, Y):\n \"\"\"\n Will run through one pass through of the network form input layer to output layer\n and calculate the total error of the output layer and return that\n :param X: input array\n :param Y: ground truth response variabels\n :return: total error of network output\n \"\"\"\n Layer_results = list()\n inputs = X\n old_inputs = None\n for lyr in self.layers:\n # grab the layer and apply the inputs to the layer\n lyr.calculate(inputs)\n if self.verbose > -1:\n print('layer {} outputs: {}'.format(lyr.ID, lyr.outputs))\n inputs = list() # will be used to store the result of last layer to feed into next\n\n # go through neurons of current layer processing input,\n # producing outputs to feed into the next layer if not the output/last layer\n for n in lyr.neurons:\n inputs.append(n.output[0]) # store the last layers output\n if self.verbose > -1:\n print('inputs i ', inputs[-1])\n inputs = np.array(inputs, dtype=np.float)\n old_inputs = inputs.copy()\n self.out_re = old_inputs\n # pass the outputs to the layer\n if self.verbose > -1:\n print('--------------------')\n print('--------------------')\n print('--------------------')\n if self.verbose > -1:\n print('inputs from last layer = {}, pass to network for error calculation and decisions'.format(old_inputs))\n E_total = 0\n # go through last layer calculateing total error\n for n, y in zip(self.layers[-1].neurons, Y):\n n.calculate_error(y) # call the neurons calculate_error method with stores the loss in the neuron's loss variable\n E_total += n.loss # sum the error from all of the output layers neurons\n if self.verbose > -1:\n print('************* Total Error {}'.format(E_total))\n return E_total # return the total error for that run to be tested against the threshold\n\n def back_propagate(self, yt, verbose=True, k=1):\n #r = self.w * previous\n # TODO: now perform Back propagation\n\n # ******************** TODO: handle output\n # Calculate Error derivative:\n # calculate error prime for the last layer for each neuron\n # and pass to network\n net_e_prime = list() #will contain the values TODO: make the actual neuron stuff happen in layer\n net_e_prime = self.layers[-1].pass_error_prime_to_network(list(), yt)\n '''\n for on, y in zip(self.layers[-1].neurons, yt):\n wu1, delni = on.error_Prime(ytruth=y) # delni = del_b\n net_e_prime.append(on.del_b)\n '''\n # calculate activation prime for the last layer\n # and pass to network\n net_a_prime = self.layers[-1].pass_activation_prime_to_network(list())\n '''\n for on, y in zip(self.layers[-1].neurons, yt):\n on.activation_funcPrime()\n net_a_prime.append(on.act_prime)\n '''\n\n # now calculate the delta's for\n # the neurons in the last layer\n net_del = list()\n print('eprime', net_e_prime)\n print('aprime', net_a_prime)\n for ap, ep in zip(net_a_prime, net_e_prime):\n net_del.append(ap * ep)\n # convert to a numpy array\n net_del = np.array(net_del, dtype=np.float)\n dels = net_del\n if verbose:\n print('------------------------------------')\n print('------------------------------------')\n print('rd {}'.format(net_del))\n print('------------------------------------')\n print('------------------------------------')\n print('original deltas', dels)\n print()\n bpl = list() # list of back propagation values\n\n # now update output layer and store backpropagation values to pass back\n bpl = self.layers[-1].pass_output_layer_backpropagation_array(dels, verbose=verbose, k=k) # list of back propagation values\n\n '''\n # now update output layer and store backpropagation values to pass back\n for on, dta in zip(self.layers[-1].neurons, dels):\n # update this neurons weights array\n upda = dta * on.x\n if verbose:\n print('Input x: {}'.format(on.x)) # input\n print('Initial weights w: {}'.format(on.w[:])) # initial weights\n print('Initial bias {}', on.b[:])\n print('learning rate: {}'.format(on.eta))\n print('delta for w: {}'.format(on.del_w))\n print('delta for b: {}'.format(on.del_b))\n print('dta:', dta)\n print('update: ', upda)\n print(' ******')\n\n # on.w[:] = on.w - on.eta * dta * on.x\n # update weights and bias\n on.w[:] = on.w[:] - (on.eta * upda)\n on.b[:] = on.b[:] - (on.eta * dta)\n if verbose:\n print('Updated weights w: {}'.format(on.w[:])) # initial weights\n print('Updated bias {}', on.b[:])\n print('------------')\n print('')\n # store back propagation values for current neuron\n bpl.append(on.w * dta)\n if verbose:\n print(' deltas for out n1 deltas for out n2')\n print('back prop', bpl)\n # now sum them and pass to network\n '''\n back_prop_val1 = sum(bpl)\n print('passing this back through network {}'.format(back_prop_val1))\n\n # now pass back iteratively to the previous layers\n # starting at penultimate layer\n # calculate the update values for each neuron\n # calculate the summed weighted sigma to pass back\n # and continue\n next_del = back_prop_val1\n for lyr in range(2, len(self.layers) + 1, 1):\n if verbose > 0:\n print('looking at layer {}'.format(-lyr))\n to_sum = list()\n for hn, bpv in zip(self.layers[-lyr].neurons, next_del):\n # get its activation function\n hn.activation_funcPrime()\n dta = bpv * hn.act_prime[0]\n if verbose > 1:\n print('X {}'.format(hn.x))\n print('dta', dta)\n print('activation', hn.act_prime[0])\n print('bpv', bpv)\n # update the weights and biase\n print('w b4 {}'.format(hn.w))\n print('b b4 {}'.format(hn.b))\n print(' ************************************* ')\n print(' ************************************* ')\n print(' ************************************* ')\n to_sum.append(dta*hn.w)\n print(hn.x)\n print(dta)\n hn.update_weights(dta * np.array(hn.x))\n if self.update_eta:\n hn.update_bias(dta)\n #hn.b[:] = hn.b[:] - hn.eta * dta\n if self.verbose > 1:\n print('w b4 {}'.format(hn.w))\n print('b b4 {}'.format(hn.b))\n print(' -------------------------------------------')\n print(' -------------------------------------------')\n print()\n hn.update_eta(k)\n next_del = sum(to_sum)\n\n def calculate_error(self,yt, yp, error=None):\n \"\"\" will calculate the error of the perceptron's\n output based on the error function chosen\n :param yt: ground truth output value\n :param yp: predicted output value from perceptron\n :param error: the error method to use\n :return:\n \"\"\"\n if error is None:\n error = self.activations[-1]\n if error.lower() == 'mse':\n return MSE(yt, yp)\n elif error.lower() == 'bce':\n return binary_cross_entropy(yt, yp)\n\n def error_Prime(self, X, ytruth, ypred, error=None, verbose=False):\n \"\"\" Method will calculate the derivative ot the error function\n :param X: input vector\n :param ytruth: ground truth output\n :param ypred: predicted output value\n :param error: type of error/cost function to use\n :return:\n \"\"\"\n if error is None:\n error = self.activations[-1].lower()\n if error == 'mae':\n print('mae')\n maePrime_w = -1 / len(ytruth) * np.dot((ytruth - ypred) / (abs(ytruth - ypred)), X)\n maePrime_b = -1 / len(ytruth) * sum([(yt - yp) / abs(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n return [maePrime_w, maePrime_b]\n elif error == 'mse':\n print('mse')\n msePrime_w = -2 / len(ytruth) * np.dot((ytruth - ypred), X)\n msePrime_b = -2 / len(ytruth) * sum([(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n return [msePrime_w, msePrime_b]\n elif error == 'bce':\n print('bce')\n msePrime_w = np.dot((ytruth - ypred), X)\n msePrime_b = sum([(yt - yp) for yt, yp, in zip(ytruth, ypred)])\n return [msePrime_w, msePrime_b]\n else:\n print('ERROR: Unknown error method {}, must be one of:'.format(error))\n print(list(self.error_dictS.keys()))\n quit(-215)\n\n def gradient_descent(self, X, yt, yp, verbose=False):\n dels = self.error_Prime(X, yt, yp, error=self.loss, verbose=verbose)\n self.w[:] = self.w - self.eta * dels[0]\n self.b[:] = self.b - self.eta * dels[1]\n\n def calculate_loss(self):\n # calculate loss from last layer\n # grab last layers outputs\n if self.activations[-1] == 'MSE':\n err = MSE(self.Y, self.layers[-1].outputs)\n err_primeW, err_primeb = self.error_Prime(self.X, self.Y, self.layers[-1].outputs)\n self.layers[-1].update_weights(err_primeW)\n self.layers[-1].update_bias(err_primeb)\n # now do the back prop from the second the last to the first\n for i in range(-2, -len(self.layers)+1, -1):\n pass\n\n# #####################################################\n# #####################################################\n# ######### TODO: Regression Performance ########\n# #####################################################\n# #####################################################\ndef Rvar(ytrue, ypred):\n ymean = ypred.mean(axis=0)\n ssreg = SSREG(ytrue, ymean=ymean)\n ssres = SSRES(ytrue=ytrue, ypred=ypred)\n return (SSREG(ypred, ymean) / len(ypred)) / (SSTOT(ytrue) / len(ypred))\n\ndef binary_cross_entropy(ytrue, yprob):\n N = len(ytrue)\n ytrue = ytrue.reshape(N,1)\n yprob = ytrue.reshape(N,1)\n return -sum([BCE(yt, yp) for yt,yp in zip(ytrue, yprob)]) / N\n\ndef log_loss(ytrue, yprob):\n return binary_cross_entropy(ytrue, yprob)\n\ndef BCE(ytrue, yprob):\n return -sum([yt*np.log(max(yp, 1e-15)) + (1-yt)*np.log(max(1-yp, 1e-15)) for yt, yp in zip(ytrue, yprob)])/len(ytrue)\n\ndef SSE2( ytrue, ypred):\n sm = sum([.5 * ((yt - yp) ** 2) for yp, yt in zip(ytrue, ypred)])\n return sm\n\ndef SE(ytrue, ypred):\n n = len(ytrue)\n print('n', n)\n return SSE2(ytrue, ypred) / n\n\ndef SSE( ytrue, ypred):\n sm = sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n return sm\n\ndef MSE(ytrue, ypred):\n n = len(ytrue)\n return SSE(ytrue, ypred) / n\n\ndef RMSE(ytrue, ypred):\n return np.sqrt(MSE(ytrue, ypred))\n\ndef MAD(ytrue, ypred):\n n = len(ytrue)\n return sum([abs(yt - yp) for yp, yt in zip(ytrue, ypred)]) / n\n\ndef MAE(ytrue, ypred):\n n = len(ytrue)\n return sum([abs(yt - yp) for yp, yt in zip(ytrue, ypred)]) / n\n\ndef SSREG(ypred, ymean):\n return sum([(yp - ymean) ** 2 for yp in ypred])\n\ndef SSRES(ytrue, ypred):\n return sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n\ndef COD(ytrue, ypred):\n return 1 - (SSRES(ytrue, ypred)/SSTOT(ytrue))\n\ndef SSTOT(ytrue):\n ymean = ytrue.mean(axis=0)\n return sum([(yt - ymean) ** 2 for yt in ytrue]) # scatter total (sum of squares)\n\ndef calculate_log_like(attribs, params):\n #attribs.append('const')\n l = []\n for attrib in attribs:\n l.append(params[attrib])\n return np.exp(l).tolist()\n\ndef correct(ytrue, ypredict):\n # count the predictions that are correct\n return sum(yt == yp for yt, yp in zip(ytrue, ypredict))\n\ndef shape_check(yc, yd):\n if yc.shape != yd.shape:\n yc = yc.reshape(yd.shape[0], yd.shape[1])\n return yc\n\n\ndef accuracy(ytrue, ypredict):\n ypredict = shape_check(ypredict, ytrue)\n return correct(ytrue, ypredict)/len(ytrue)\n\n\n# ##################################################################\n# ##################################################################\n# ################TODO: machine learning tools ####################\n# ##################################################################\n# ##################################################################\ndef epsilon(emax, emin, k, kmax):\n \"\"\" Can be used to modify the learning rate as training occurs\n :param emax: starting learning rate\n :param emin: the final learning rate\n :param k: current step\n :param kmax: controls how many steps it takes to get to emin\n :return: new learning rate\n \"\"\"\n return emax * ((emin/emax)**(min(k, kmax)/kmax))\n\n\n\n# handles the command line arguments if there are any\ndef handle_cmd_line(method='example', verbose=False):\n \"\"\" This method just looks for the first command line argument. If none are given\n the program will run all three options for Project 1\n\n :param method:\n :param verbose:\n :return:\n \"\"\"\n if len(sys.argv) == 2:\n return sys.argv[1]\n return method" }, { "alpha_fraction": 0.5163018703460693, "alphanum_fraction": 0.526102602481842, "avg_line_length": 38.44329833984375, "blob_id": "b46c3b20daf2307edeb8b1c185f127be28e208be", "content_id": "5e470652f0fe8ad57df11ca1e76f636c332dbf83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30618, "license_type": "no_license", "max_line_length": 152, "num_lines": 776, "path": "/performance_metrics.py", "repo_name": "gjones1911/DeepSolar_Code_Base", "src_encoding": "UTF-8", "text": "import math\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nimport statsmodels.api as sm\nimport statsmodels.discrete.discrete_model as dis_mod\n\nimport statsmodels.formula.api as smf\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor as VIF\nimport sys\nfrom _products.utility_fnc import *\nfrom _products.visualization_tools import *\nfrom sklearn import metrics\npd.options.mode.use_inf_as_na = True\nviz = Visualizer()\n\n\n# #####################################################\n# #####################################################\n# ######### TODO: Regression Performance ########\n# #####################################################\n# #####################################################\ndef Rvar(ytrue, ypred):\n ymean = ypred.mean(axis=0)\n ssreg = SSREG(ytrue, ymean=ymean)\n ssres = SSRES(ytrue=ytrue, ypred=ypred)\n return (SSREG(ypred, ymean) / len(ypred)) / (SSTOT(ytrue) / len(ypred))\n\ndef SSE( ytrue, ypred):\n return sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n\ndef MSE(ytrue, ypred):\n n = len(ytrue)\n return SSE(ytrue, ypred) / n\n\ndef RMSE(ytrue, ypred):\n return math.sqrt(MSE(ytrue, ypred))\n\ndef MAD(ytrue, ypred):\n n = len(ytrue)\n return sum([abs(yt - yp) for yp, yt in zip(ytrue, ypred)]) / n\n\ndef MAE(ytrue, ypred):\n n = len(ytrue)\n return sum([abs(yt - yp) for yp, yt in zip(ytrue, ypred)]) / n\n\n\ndef SSREG(ypred, ymean):\n return sum([(yp - ymean) ** 2 for yp in ypred])\n\ndef SSRES(ytrue, ypred):\n return sum([(yt - yp) ** 2 for yp, yt in zip(ytrue, ypred)])\n\ndef COD(ytrue, ypred):\n return 1 - (SSRES(ytrue, ypred)/SSTOT(ytrue))\n\ndef SSTOT(ytrue):\n ymean = ytrue.mean(axis=0)\n return sum([(yt - ymean) ** 2 for yt in ytrue]) # scatter total (sum of squares)\n\ndef calculate_log_like(attribs, params):\n #attribs.append('const')\n l = []\n for attrib in attribs:\n l.append(params[attrib])\n return np.exp(l).tolist()\n\ndef calculate_vif (x):\n return pd.Series([VIF(x.values, i)\n for i in range(x.shape[1])],\n index=x.columns)\n\n\n# #############################################################\n# #############################################################\n# ########### TODO: Classification metrics ##############\n# source: https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n# #############################################################\n# #############################################################\ndef class_count(ytrue, val):\n return len(ytrue[ytrue == val])\n\ndef class_prob(ytrue, val):\n return class_count(ytrue, val)/len(ytrue)\n\ndef c_p_handler2(method, param_dict):\n method(param_dict[0], param_dict[1])\n\ndef c_p_handler3(method, param_dict):\n method(param_dict[0], param_dict[1], param_dict[2])\n\ndef c_p_handler4(method, param_dict):\n method(param_dict[0], param_dict[1], param_dict[2], param_dict[3])\n\ndef correct(ytrue, ypredict):\n if not type_check(ytrue, 'numpy'):\n ytrue = np.array(ytrue)\n if not type_check(ypredict, 'numpy'):\n ypredict = np.array(ypredict)\n ytrue = ytrue.reshape(len(ytrue), 1)\n ypredict = ypredict.reshape(len(ypredict), 1)\n # count the predictions that are correct\n return sum(yt == yp for yt, yp in zip(ytrue, ypredict))\n\ndef shape_check(yc, yd):\n if yc.shape != yd.shape:\n yc = yc.reshape(yd.shape[0], yd.shape[1])\n return yc\n\ndef cnt_false(y):\n return sum([e[0] == 0 for e in y])\n\ndef cnt_true(y):\n return sum([e == 1 for e in y])\n\ndef cnt_val(y, val):\n return sum([e == val for e in y])\n\ndef correct_label(ytrue, ypredict, label):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for labeling of label that are correct\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n #return ytrue[ytrue == ypredict and ytrue == label].tolist().count(label)\n return len(ytrue[ytrue == ypredict and ytrue == label])\n\ndef true_positives(ytrue, ypredict, label=1):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for a 1 that are correct\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n tp = sum([yt == yp and yt == 1 for yt, yp in zip(ytrue, ypredict)])\n return tp[0]\n\ndef true_negatives(ytrue, ypredict, label=0):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for label that are correct\n # grab every thing that agrees with the truth\n # and count the number that were miss labeled label\n if type(ytrue) != type(np.array([0])):\n ytrue = np.array(ytrue)\n if type(ypredict) != type(np.array([0])):\n ypredict = np.array(ypredict)\n \"\"\"\n print('--------------------------------------')\n print('after the conversion to numpy arrays')\n print('ytrue, size: {}'.format(len(ytrue)))\n print('ypredict, size: {}'.format(len(ypredict)))\n print('ytrue, shape: {}'.format(ytrue.shape))\n print('ypredict, shape: {}'.format(ypredict.shape))\n print('--------------------------------------')\n print('--------------------------------------')\n print('ypredict')\n print(ypredict)\n print('ytrue')\n print(ytrue)\n print()\n \"\"\"\n ytrue = ytrue.reshape(len(ytrue),1)\n ypredict = ypredict.reshape(len(ypredict),1)\n \"\"\"\n print('--------------------------------------')\n print('after the conversion to numpy arrays')\n print('ytrue, size: {}'.format(len(ytrue)))\n print('ypredict, size: {}'.format(len(ypredict)))\n print('ytrue, shape: {}'.format(ytrue.shape))\n print('ypredict, shape: {}'.format(ypredict.shape))\n print('--------------------------------------')\n print('--------------------------------------')\n print('ypredict')\n print(ypredict)\n print('ytrue')\n print(ytrue)\n print()\n \"\"\"\n quck = sum([yt==yp for yt, yp in zip(ytrue, ypredict)])\n #print('correct total: {}'.format(quck))\n c_z = sum([(yp == yt and yp) == 0 for yp, yt in zip(ypredict, ytrue)])\n #print('correct by iter', c_z)\n #print('label',label)\n #c_z_f = ytrue[ytrue == ypredict and ytrue == label].tolist().count(label)\n #rint('correct by fancy', c_z_f)\n return c_z[0]\n\ndef incorrect(ytrue, ypredict, timed=False):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for a label that are incorrect\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n return sum(yt == yp for yt, yp in zip(ytrue, ypredict))[0]\n\ndef incorrect_label(ytrue, ypredict, label, timed=False):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for a label that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number that were incorrectly labeled label\n return sum(yt != yp and yp == label for yt, yp in zip(ytrue, ypredict))[0]\n\ndef false_positives(ytrue, ypredict, label=1):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for a 1 that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number of label that were miss labeled\n return sum(yt != yp and yp == 1 for yt, yp in zip(ytrue, ypredict))[0]\n\ndef false_negatives(ytrue, ypredict, label=0):\n ypredict = shape_check(ypredict, ytrue)\n # count the predictions for a 0 that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number of label that were miss labeled\n return sum(yt != yp and yp == 0 for yt, yp in zip(ytrue, ypredict))[0]\n\ndef false_others(ytrue, ypredict, label):\n ypredict = shape_check(ypredict, ytrue)\n false_others_cnt_dic = {}\n # get the others\n # from predicted list\n other_pred = ypredict[ypredict != label]\n # from true list\n other_true = ytrue[ytrue != label]\n # grab the unique values for the other labels\n others = set(ypredict[ypredict != label].tolist())\n for other in others:\n # how many\n false_others_cnt_dic[other] = len(other_pred[other_pred == label and other_true == other ])\n return false_others_cnt_dic\n\ndef accuracy(ytrue, ypredict):\n ypredict = shape_check(ypredict, ytrue)\n return correct(ytrue, ypredict)/len(ytrue)\n\ndef sensitivity(ytrue, ypredict, label=1):\n ypredict = shape_check(ypredict, ytrue)\n \"\"\" accuracy in predicting positives\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true positive predictions / the total number of oucomes\n \"\"\"\n tp = true_positives(ytrue, ypredict, label=label)\n N = cnt_true(ytrue)\n return tp/max(N, 1e-15)\n\ndef specificity(ytrue, ypredict):\n \"\"\" accuracy in predicting negatives\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n ytrue = np.array(ytrue).reshape(len(ytrue), 1)\n ypredict = np.array(ypredict).reshape(len(ypredict), 1)\n tn = true_negatives(ytrue, ypredict, 0)\n N = cnt_false(ytrue)\n return max(tn, 1e-15) / max(N, 1e-15)\n\n\n\ndef precision(ytrue, ypredict, label=1):\n \"\"\" quality/ability to predict ones correctly (how well it does not call negatives positive\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n tp = true_positives(ytrue, ypredict, label)\n fp = false_positives(ytrue, ypredict, label)\n return tp / max(tp+fp, 1e-15)\n\ndef NPV(ytrue, ypredict):\n \"\"\" negative predictive value (NPV) a measure of how well it does not call positivs negative\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n tn = true_negatives(ytrue, ypredict)\n fn = false_negatives(ytrue, ypredict)\n return tn / max(tn+fn, 1e-15)\n\n\ndef Gconfusion_matrix(ytrue, ypredict):\n \"\"\" Generates a binary confusion matrix\n :param ytrue:\n :param ypredict:\n :return:\n \"\"\"\n tp = true_positives(ytrue, ypredict)\n tn = true_negatives(ytrue, ypredict)\n fp = false_positives(ytrue, ypredict)\n fn = false_negatives(ytrue, ypredict)\n cm = [[tn, fp],\n [fn, tp]]\n return cm\n\nclass ClassificationPerformance:\n def class_count(self, ytrue, val):\n return len(ytrue[ytrue == val])\n\n def class_prob(self, ytrue, val):\n return class_count(ytrue, val) / len(ytrue)\n\n def c_p_handler2(self, method, param_dict):\n method(param_dict[0], param_dict[1])\n\n def c_p_handler3(self, method, param_dict):\n method(param_dict[0], param_dict[1], param_dict[2])\n\n def c_p_handler4(self, method, param_dict):\n method(param_dict[0], param_dict[1], param_dict[2], param_dict[3])\n\n def correct(self, ytrue, ypredict):\n # count the predictions that are correct\n return len(ytrue[ytrue == ypredict])\n\n def correct_label(self, ytrue, ypredict, label):\n # count the predictions for labeling of label that are correct\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n # return ytrue[ytrue == ypredict and ytrue == label].tolist().count(label)\n return len(ytrue[ytrue == ypredict and ytrue == label])\n\n def true_positives(self, ytrue, ypredict, label=1):\n # count the predictions for a 1 that are correct\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n return ytrue[ytrue == ypredict].tolist().count(label)\n\n def true_negatives(self, ytrue, ypredict, label=0):\n # count the predictions for label that are correct\n # grab every thing that agrees with the truth\n # and count the number that were miss labeled label\n return ytrue[ytrue == ypredict].tolist().count(label)\n\n def incorrect(self, ytrue, ypredict, timed=False):\n # count the predictions for a label that are incorrect\n # grab every thing that agrees with the truth\n # and count the number that were correctly labeled label\n return len(ytrue[ytrue != ypredict])\n\n def incorrect_label(self, ytrue, ypredict, label, timed=False):\n # count the predictions for a label that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number that were incorrectly labeled label\n return ypredict[ypredict != ytrue].tolist().count(label)\n\n def false_positives(self, ytrue, ypredict, label=1):\n # count the predictions for a 1 that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number of label that were miss labeled\n return ypredict[ypredict != ytrue].tolist().count(label)\n\n def false_negatives(self, ytrue, ypredict, label=0):\n # count the predictions for a 0 that are incorrect\n # grab every thing that disagrees with the truth\n # and count the number of label that were miss labeled\n return ypredict[ypredict != ytrue].tolist().count(label)\n\n def false_others(self, ytrue, ypredict, label):\n false_others_cnt_dic = {}\n # get the others\n # from predicted list\n other_pred = ypredict[ypredict != label]\n # from true list\n other_true = ytrue[ytrue != label]\n # grab the unique values for the other labels\n others = set(ypredict[ypredict != label].tolist())\n for other in others:\n # how many\n p_o =ypredict[ypredict != ytrue and ypredict == other]\n t_o = other_pred[other_true == other]\n false_others_cnt_dic[other] = len()\n return false_others_cnt_dic\n\n def accuracy(self, ytrue, ypredict):\n return correct(ytrue, ypredict) / len(ytrue)\n\n def sensitivity(self, ytrue, ypredict, label=1):\n \"\"\" accuracy in predicting positives\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true positive predictions / the total number of oucomes\n \"\"\"\n tp = true_positives(ytrue, ypredict, label=label)\n N = len(ypredict)\n return tp / max(N, 1e-15)\n\n def specificity(self, ytrue, ypredict, label=0):\n \"\"\" accuracy in predicting negatives\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n tn = true_negatives(ytrue, ypredict, label)\n N = len(ypredict)\n return tn / max(N, 1e-15)\n\n def precision(self, ytrue, ypredict, label=1):\n \"\"\" quality/ability to predict ones correctly (how well it does not call negatives positive\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n tp = true_positives(ytrue, ypredict, label)\n fp = false_positives(ytrue, ypredict, label)\n return tp / max(tp + fp, 1e-15)\n\n def NPV(self, ytrue, ypredict):\n \"\"\" negative predictive value (NPV) a measure of how well it does not call positivs negative\n :param ytrue: the ground truth outcomes\n :param ypredict: the predicted outcomes\n :return: #float true negative predictions / the total number of outcomes\n \"\"\"\n tn = true_negatives(ytrue, ypredict)\n fn = false_negatives(ytrue, ypredict)\n return tn / max(tn + fn, 1e-15)\n\n def __init(self, yt=None, yp=None):\n self.yt = yt\n self.yp = yp\n\n\n\ndef find_significant(x,pvals):\n cnt = -1\n for e in pvals:\n if cnt > -1:\n print(x[cnt], \":\", np.around(e,4))\n cnt += 1\n\n\ndef h_regression(dataset, ysets, xsets):\n\n blocks = list()\n #dataset = fix_dataset(dset[ysets+xsets[0]])\n for y in ysets:\n print('##############################################################################')\n print('\\t\\t\\t\\t\\t\\t',y)\n print('##############################################################################')\n cnt = 0\n for x in xsets:\n blocks += x\n # my method up above to take care of missing or unusable values\n dmodel = fix_dataset(dataset[[y]+blocks])\n Y = dmodel[y]\n print()\n print()\n print('################################################################################')\n print('##################################### Block {:d}'.format(cnt+1))\n print('################################################################################')\n print('\\t\\tX', x)\n print('################################################################################')\n print('################################################################################')\n print('################################################################################')\n print()\n X = dmodel[blocks]\n #print(X['per_capita_income'])\n #X.loc[:, 'per_capita_income'] = (dmodel['per_capita_income'].values - dmodel['per_capita_income'].mean())/dmodel['per_capita_income'].std()\n #print(X['per_capita_income'])\n #X = dataset.loc[:, x]\n X2 = sm.add_constant(X)\n est = sm.OLS(Y, X2)\n est2 = est.fit()\n print(est2.summary())\n cnt += 1\n print()\n print()\n return\n\n\n# performs some for of regression\n# either linear or logistic\ndef analyze_data(ysets, xsets, ytest, xtest, type='LinR', normalize=False):\n #dataset = fix_dataset(dset[ysets+xsets[0]])\n regre_type = ''\n if type == 'LinR':\n regre_type = 'Linear Regression'\n elif type == 'LogR':\n regre_type = 'Logistic Regression'\n else:\n print('Error Unknown regression method {:s}'.format(type))\n quit()\n old_rsqr = 0\n old_fstat = 10e20\n del_rsqr = 0\n del_fstat = 0\n num_sig = 0\n for y, yt in zip(ysets, ytest):\n print('##############################################################################')\n #print('\\t\\t\\t\\t\\t\\t',y)\n print('##############################################################################')\n cnt = 0\n for x, xt in zip(xsets, xtest):\n\n Y = y\n Yt = yt\n print(len(Y), len(x))\n print()\n print('################################################################################')\n print('##################################### Testing x set {:d}'.format(cnt+1))\n #print('##################################### Using {:s} on dependent variable {:s}'.format(regre_type, y))\n print('################################################################################')\n print('\\t\\tX or dependent variables:\\n', x.columns.values.tolist())\n print('################################################################################')\n print('################################################################################')\n print('################################################################################')\n print()\n X = x\n Xt = xt\n #print('+++++++++++++++++++++++++++++++++++++++++Before: ', X[0,0])\n\n #print('+++++++++++++++++++++++++++++++++++++++++After: ', X.iloc[0,0])\n #print(X['per_capita_income'])\n #X.loc[:, 'per_capita_income'] = (dmodel['per_capita_income'].values - dmodel['per_capita_income'].mean())/dmodel['per_capita_income'].std()\n #print(X['per_capita_income'])\n #X = dataset.loc[:, x]\n X2 = sm.add_constant(X)\n Xt2 = sm.add_constant(Xt)\n if type == 'LinR':\n est = sm.OLS(Y, X2)\n print('\\n\\nThe basic dirs are\\n', dir(est))\n est2 = est.fit()\n print('\\n\\nThe fitted dirs are\\n', dir(est2))\n rsqr = est2.rsquared\n if rsqr > old_rsqr:\n old_rsqr = rsqr\n pvals = est2.pvalues\n fval = est2.fvalue\n ftest = est2.f_test\n print('R-squared:',rsqr)\n print('P-values:\\n', pvals)\n find_significant(x, pvals)\n print('Fvalue\\n',fval)\n print(est2.summary())\n print('\\n\\nThe summary dirs are:\\n',dir(est2.summary()))\n vif = calculate_vif(X2)\n print('VIF:\\n', vif)\n elif type == 'LogR':\n #clf = LogisticRegression(solver='lbfgs',max_iter=1000).fit(X2, Y)\n #params = clf.coef_\n #log_like = np.log(np.abs(params))\n #print(params)\n #print(log_like)\n #print('the y and x')\n #print(Y.values, X2.values)\n n = len(X2)\n print('n',n)\n model = dis_mod.Logit(Y.values, X2)\n model2 = model.fit()\n loglikly= calculate_log_like(x, model2.params)\n print(dir(model))\n print(model.df_model)\n print(model2.summary())\n llfv = model2.llf\n llnullv = model2.llnull\n print('llf: ', llfv)\n print('llf: ', llnullv)\n print('McFadden’s pseudo-R-squared: ', 1 - (llfv/llnullv)) # https://statisticalhorizons.com/r2logistic\n cxsn = G_Cox_Snell_R2(llnullv, llfv, n)\n print('Cox\\'s Snell: {}'.format(cxsn) )\n print('model 2',dir(model2))\n print('R squared:', model2.prsquared) # McFadden’s pseudo-R-squared.\n print(dir(model2.summary().tables))\n print('The log likelyhoods are:')\n show_labeled_list(loglikly, x)\n print('pvalue for {:s}: {:f}'.format(X2.columns.values.tolist()[0], model2.pvalues.loc[x.columns.values.tolist()[0]]))\n y_pred = model2.predict(Xt2, linear=True)\n #print(y_pred)\n yp = list()\n for e in y_pred:\n if e > 0:\n yp.append(1)\n else:\n yp.append(0)\n #print(model.loglikeobs(x))\n #df_confusion = pd.crosstab(Y, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)\n viz.plot_confusion_matrix(Yt, yp, classes=['NA', 'A'],\n title='Confusion matrix, without normalization')\n #plot_confusion_matrix(df_confusion)\n #vif = pd.Series([VIF(X2.values, i)\n # for i in range(X2.shape[1])],\n # index=X2.columns)\n vif = calculate_vif(X2)\n print('VIF:\\n',vif)\n plt.show()\n cnt += 1\n print()\n print()\n return\n\ndef G_Cox_Snell_R2(llnull, llmodel, n):\n v = 2/n\n print('v',v)\n va = np.exp(llnull)\n vb = np.exp(llmodel)\n print('va, vb', va, vb)\n return 1 - (va/vb)**v\n\n# #####################################################\n# #####################################################\n# ######### TODO: Regression Performance ########\n# #####################################################\n# #####################################################\ndef SM_Logit(Training, Testing, verbose=False):\n X = Training[0]\n Y = Training[1]\n print(X)\n Xt = Testing[0]\n Yt = Testing[1]\n\n # add the constant to the model\n X2 = sm.add_constant(X)\n Xt2 = sm.add_constant(Xt)\n\n # grab the size of the data\n n = len(X2)\n if verbose:\n print('n', n)\n # create and fit the model\n model = dis_mod.Logit(Y.values, X2)\n model2 = model.fit()\n # calculate the loglikely hood\n loglikly = calculate_log_like(X, model2.params)\n\n if verbose:\n print(dir(model))\n print(model.df_model)\n print(model2.summary())\n # grab the log likely hood for the model and just the intercept for later calculations\n llfv = model2.llf\n llnullv = model2.llnull\n print('llf: ', llfv)\n print('llf: ', llnullv)\n print('McFadden’s pseudo-R-squared: ', 1 - (llfv / llnullv)) # https://statisticalhorizons.com/r2logistic\n cxsn = G_Cox_Snell_R2(llnullv, llfv, n)\n print('Cox\\'s Snell: {}'.format(cxsn))\n print('model 2', dir(model2))\n print('R squared:', model2.prsquared) # McFadden’s pseudo-R-squared.\n # print(dir(model2.summary().tables))\n print('The log likelyhoods are:')\n show_labeled_list(loglikly, X)\n print('pvalue for {:s}: {:f}'.format(X2.columns.values.tolist()[0], model2.pvalues.loc[X.columns.values.tolist()[0]]))\n y_pred = model2.predict(Xt2, linear=True)\n # print(y_pred)\n yp = list()\n predicted_prob = list()\n for e in y_pred:\n # print('e: {}, ln(e): {}, e^(e): {}'.format(e, np.log(e), np.exp(e)))\n predicted_prob.append(np.exp(e))\n if e > 0:\n yp.append(1)\n else:\n yp.append(0)\n # print(model.loglikeobs(x))\n # df_confusion = pd.crosstab(Y, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)\n rd = viz.plot_confusion_matrix(Yt, yp, classes=['NA', 'A'],\n title='Confusion matrix, without normalization')\n vif = calculate_vif(X2)\n print('VIF:\\n',vif)\n plt.show()\n\n rdict = {'Accuracy':rd['Accuracy'], 'Sensitivity':rd['Sensitivity'],\n 'Precision':rd['Precision'], 'Specificity':rd['Specificity'], 'MacFadden_Rsquare':[model2.prsquared]}\n return rdict\n\nclass SM_Logit_model():\n def __init__(self):\n self.X=None\n self.y=None\n self.Xt=None\n self.yt=None\n self.model1=None\n self.fitted_model=None\n self.predicted_prob = list()\n\n def fit(self, X, Y):\n self.X=sm.add_constant(X)\n self.Y=Y\n self.model = dis_mod.Logit(self.Y.values, self.X)\n try:\n self.fitted_model = self.model.fit()\n return\n except np.linalg.LinAlgError:\n print('uh oh !!! some linear algebra broke ignore this set and move on')\n return -1\n\n def predict(self, X):\n print('X')\n X2 = sm.add_constant(X)\n print(X2)\n y_pred = self.fitted_model.predict(X2, linear=True)\n yp = list()\n for e in y_pred:\n # print('e: {}, ln(e): {}, e^(e): {}'.format(e, np.log(e), np.exp(e)))\n self.predicted_prob.append(np.exp(e))\n if e > 0:\n yp.append(1)\n else:\n yp.append(0)\n return yp\n\n def score(self, X, Y, metric='Accuracy'):\n yp = self.predict(X)\n cm = confusion_matrix(Y, yp)\n rd = process_cm(cm)\n return rd[metric]\n\n def get_Macfadden(self):\n return self.fitted_model.prsquared\n\n\n\n\ndef clustering_performance(clstr_clf, X, y, X2, y2, verbose=False, comp_kn=None):\n if comp_kn is None:\n clstr_clf.fit(X,y)\n yp = clstr_clf.predict(X)\n else:\n yp = clstr_clf.fit_predict(X)\n hmo1 = metrics.homogeneity_score(y, yp)\n acc1 = metrics.accuracy_score(y, yp)\n sens1 = metrics.recall_score(y, yp)\n spec1 = metrics.precision_score(y, yp)\n if verbose:\n print('-----------------------------------')\n print('-----------------------------------')\n print('----------- Training Set -----------')\n print('Homogeniety: {:.3f}'.format(hmo1))\n print('Accuracy: {:.3f}'.format(acc1))\n print('Recall: {:.3f}'.format(sens1))\n print('Precision: {:.3f}'.format(spec1))\n yp2 = clstr_clf.predict(X2)\n hmo2 = metrics.homogeneity_score(y2, yp2)\n acc2 = metrics.accuracy_score(y2, yp2)\n sens2 = metrics.recall_score(y2, yp2)\n spec2 = metrics.precision_score(y2, yp2)\n if verbose:\n print('-----------------------------------')\n print('----------- Testing Set -----------')\n print('Homogeniety: {:.3f}'.format(hmo2))\n print('Accuracy: {:.3f}'.format(acc2))\n print('Recall: {:.3f}'.format(sens2))\n print('Precision: {:.3f}'.format(spec2))\n print('-----------------------------------')\n print('-----------------------------------')\n\n train_res = {'Homogeniety':hmo1, 'Accuracy':acc1, 'Recall':sens1, 'Precision':spec1}\n test_res = {'Homogeniety': hmo2, 'Accuracy': acc2, 'Recall': sens2, 'Precision': spec2}\n return train_res, test_res\n\n\ndef process_cm(cm, verbose=False):\n specificity = cm[0][0] / (cm[0][0] + cm[0][1])\n sensitivity = cm[1][1] / (cm[1][0] + cm[1][1])\n overall_acc = (cm[1][1] + cm[0][0]) / (cm[1][0] + cm[1][1] + cm[0][0] + cm[0][1])\n precision = (cm[0][0] / (cm[0][0] + cm[1][0]))\n print('Accuracy: {:.3f}'.format(overall_acc))\n print('Recall: {:.3f}'.format(sensitivity))\n print('Specificity: {:.3f}'.format(specificity))\n print('Precision: {:.3f}'.format(precision))\n title = 'Accuracy: {:.3f}\\nrecall: {:.3f}\\nprecision: {:.3f}\\nspecificity: {:.3f}'.format(overall_acc,\n sensitivity,\n precision,\n specificity)\n return dict({'Accuracy': overall_acc, 'Sensitivity': sensitivity,'Precision': precision, 'Specificity': specificity, 'CM': cm})\n\n\n\n\n# ######################################################################################\n# ######################################################################################\n# ################## TODO: Timing tools ############################\n# ######################################################################################\n# ######################################################################################\n\ndef method_timer(method, param_dict):\n st = time.time()\n method(param_dict)\n exe_time = time.time() - st\n return exe_time\n\n\n" } ]
11
kirand1303/test
https://github.com/kirand1303/test
2983cd061bf02455658684541bff5e16d549e438
e786fa78b17d8a48c2998844fc489dc5b3c52c63
716ac927b55977b6b256f7ec17361d2b845c0fdf
refs/heads/master
2020-03-12T02:56:40.974997
2018-04-20T21:53:04
2018-04-20T21:53:04
130,414,650
0
0
null
2018-04-20T21:16:23
2018-04-20T21:16:25
2018-04-20T21:25:38
null
[ { "alpha_fraction": 0.47594502568244934, "alphanum_fraction": 0.5171821117401123, "avg_line_length": 21.200000762939453, "blob_id": "80b65d5ae439b0163f4b6711bfc96f23de2090b7", "content_id": "1680641525bf16bab3516adc572208321db1dddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 582, "license_type": "no_license", "max_line_length": 53, "num_lines": 25, "path": "/DataGenerator.py", "repo_name": "kirand1303/test", "src_encoding": "UTF-8", "text": "import numpy as np\r\nimport matplotlib.pylab as plt\r\n\r\nclass DataGenerator:\r\n\r\n def linear_fn_increase( self ):\r\n x = np.arange(100)\r\n delta = np.random.uniform(-10,10,size=(100,))\r\n y = .4 * x + 3 + delta\r\n print(y)\r\n plt.plot(x,y)\r\n plt.show()\r\n\r\n def linear_fn_decrease( self ):\r\n x = np.arange(100)\r\n delta = np.random.uniform(-10,10,size=(100,))\r\n y = .4 * x - 3 + delta\r\n print(y)\r\n plt.plot(x,y)\r\n plt.show()\r\n\r\n\r\nd = DataGenerator()\r\n#d.linear_fn_increase()\r\nd.linear_fn_decrease()\r\n\r\n" }, { "alpha_fraction": 0.8187134265899658, "alphanum_fraction": 0.8187134265899658, "avg_line_length": 56, "blob_id": "1499e92cf19cc0dc3f5483be87e7a2f76e9a9f39", "content_id": "e8b87c252cebba7b71075137ef116fa02f742215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 171, "license_type": "no_license", "max_line_length": 107, "num_lines": 3, "path": "/README.md", "repo_name": "kirand1303/test", "src_encoding": "UTF-8", "text": "# test\nTest project to understand containers and microservices\nBuild a container that has code to generate data, deploy in cloud and run the service inside the container.\n" } ]
2
FrenchGithubUser/Email-Sender
https://github.com/FrenchGithubUser/Email-Sender
90b309a7b8e77d0be599ad127ec71144aa9654a2
5efce938df0fe9678e305f7d7ac5465b53c75fe1
a7fb44af07ff8927b60e8d9b28845f65c5d2e02d
refs/heads/main
2023-03-06T14:53:12.836498
2021-02-16T13:47:00
2021-02-16T13:47:00
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7906976938247681, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 106.5, "blob_id": "d1ddc60981ea3093529a77da229d1ae6bd7ba447", "content_id": "cf3c4656dad31bca9095b4bc7c1f3a0f5cff3f6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 215, "license_type": "no_license", "max_line_length": 184, "num_lines": 2, "path": "/README.md", "repo_name": "FrenchGithubUser/Email-Sender", "src_encoding": "UTF-8", "text": "Send emails to an email list.\nWarning, don't use this script to spam anyone, you could also have troubles with your email services provider if the emails list is too big or your emails could end up in spam folders.\n" }, { "alpha_fraction": 0.6460176706314087, "alphanum_fraction": 0.6548672318458557, "avg_line_length": 21.196428298950195, "blob_id": "b805f244b56680300ff9f78c907ae48f9fb68d16", "content_id": "1cd8d87ba1889478f61a1ad0c4249c7bc54fb934", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1243, "license_type": "no_license", "max_line_length": 60, "num_lines": 56, "path": "/email_sender.py", "repo_name": "FrenchGithubUser/Email-Sender", "src_encoding": "UTF-8", "text": "from email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nimport smtplib\nimport time\n\n\n# create message object instance\nmsg = MIMEMultipart()\n\ndef get_message(message_file):\n with open(message_file) as texte:\n message = texte.read()\n return message\n\ndef get_emails(emails_file):\n email_list = []\n file1 = open(emails_file, \"r\")\n while(True):\n \tline = file1.readline()\n \tif not line:\n \t\tbreak\n \temail_list.append(line.strip())\n file1.close()\n return email_list\n\nmessage = get_message('message.txt')\nemails = get_emails('emails.txt')\n\n# setup the parameters of the message\npassword = \"password\"\nmsg['From'] = \"[email protected]\"\n\nmsg['Subject'] = \"subject\"\n\n# add in the message body\nmsg.attach(MIMEText(message))\n\nserver = smtplib.SMTP('smtp-mail.outlook.com: 587')\nserver.starttls()\nserver.login(msg['From'], password)\n\nnumber = 0\n\nfor email in emails:\n try:\n server.sendmail(msg['From'], email, msg.as_string())\n print(f'Email sent to {email}')\n time.sleep(1)\n except:\n time.sleep(60)\n server.sendmail(msg['From'], email, msg.as_string())\n print(f'Email sent to {email}')\n number += 1\nserver.quit()\n\nprint(f'{number} emails sent !')\n" } ]
2
sergmalinov1/command_centr
https://github.com/sergmalinov1/command_centr
474666cb94675bc6e2ee01d259a2c7bb4bb1ba58
b1d4a3d90d764e0b6b20cd54b12259f7de272c27
ab6daf8ef15391496447ff35d55d9e8d51772993
refs/heads/master
2020-04-01T23:55:14.217661
2018-12-12T12:06:00
2018-12-12T12:06:00
153,779,352
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5804020166397095, "alphanum_fraction": 0.6218593120574951, "avg_line_length": 29.84000015258789, "blob_id": "753a0b98e678aff7910e4ee00ffa4d631222060c", "content_id": "5c1958e72932a54ef48af087cbeed2d2ccdd9d9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 128, "num_lines": 25, "path": "/structure/migrations/0009_auto_20181129_1132.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-29 09:32\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0008_auto_20181128_1648'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='country',\r\n name='account_id',\r\n field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='structure.Customer_Account'),\r\n ),\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='customer',\r\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5627118349075317, "alphanum_fraction": 0.6033898591995239, "avg_line_length": 26.095237731933594, "blob_id": "10754441da8c7958430b626a8c5af762a0608819", "content_id": "03fcad54cee3a8d6b41870305d2c4181b8677dc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 590, "license_type": "no_license", "max_line_length": 116, "num_lines": 21, "path": "/structure/migrations/0005_customer_account_customer.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-22 14:58\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('customer', '0001_initial'),\r\n ('structure', '0004_customer_account'),\r\n ]\r\n\r\n operations = [\r\n migrations.AddField(\r\n model_name='customer_account',\r\n name='customer',\r\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='customer.Customer'),\r\n preserve_default=False,\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5378031134605408, "alphanum_fraction": 0.5848787426948547, "avg_line_length": 29.863636016845703, "blob_id": "23feb84b135762daf5be0a9b0ea68fdbc2497aec", "content_id": "9797585834133edc3a6a65ad559dcfa6b94037c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 701, "license_type": "no_license", "max_line_length": 120, "num_lines": 22, "path": "/structure/migrations/0004_customer_account.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-21 10:14\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0003_auto_20181121_1210'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='Customer_Account',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=50)),\r\n ('world', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.World_version')),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.45652174949645996, "alphanum_fraction": 0.6594203114509583, "avg_line_length": 15.25, "blob_id": "e02b3d5ecd1c6a0366c3db44a0ec983a8b895c38", "content_id": "7cce8a9784198b60305c982add7b142eafa298b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 138, "license_type": "no_license", "max_line_length": 22, "num_lines": 8, "path": "/requirements.txt", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "dj-database-url==0.5.0\r\nDjango==2.1.2\r\ngunicorn==19.9.0\r\nJinja2==2.10\r\nMarkupSafe==1.0\r\npsycopg2==2.7.5\r\npytz==2018.5\r\nwhitenoise==3.2.1\r\n" }, { "alpha_fraction": 0.4106951951980591, "alphanum_fraction": 0.4128342270851135, "avg_line_length": 42.46511459350586, "blob_id": "94d5d5ac4ac54cd4f25dfb5fbee1aba9098ee4da", "content_id": "0814bf6467d08d6bfdd18220b7bcab229249d61a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 1926, "license_type": "no_license", "max_line_length": 109, "num_lines": 43, "path": "/customer/templates/include/create_country_modal.html", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "<div id=\"createCountryModal\" class=\"modal\" tabindex=\"-1\" role=\"dialog\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <h5 class=\"modal-title\">Создать страну</h5>\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\">\n <span aria-hidden=\"true\">&times;</span>\n </button>\n </div>\n\n <form action=\"/country/create/\" method=\"post\">\n <div class=\"modal-body\">\n {% csrf_token %}\n {{ country_form.as_p }}\n\n <div class=\"form-group\">\n <label for=\"exampleFormControlInput1\">Название клана:</label>\n <input class=\"form-control\" name=\"clan_name\" >\n </div>\n\n <div class=\"input-group\" >\n <div class=\"form-group\">\n <label>Выберите аккаунт:</label>\n <select class=\"custom-select\" name=\"account_number\">\n {% for item in world_list %}\n {% if item.country == \"-\" %}\n <option value={{ item.account_id }} >{{ item.account_name }}</option>\n {% endif %}\n {% endfor %}\n </select>\n </div>\n </div>\n </div>\n\n <div class=\"modal-footer\">\n <button type=\"submit\" class=\"btn btn-primary\">Создать</button>\n <button type=\"button\" class=\"btn btn-secondary\" data-dismiss=\"modal\">Отменить</button>\n </div>\n </form>\n\n </div>\n </div>\n</div>\n\n" }, { "alpha_fraction": 0.659710168838501, "alphanum_fraction": 0.670724630355835, "avg_line_length": 33.89583206176758, "blob_id": "924bb594d4a7f029e96e7c03336e3baffc8289ea", "content_id": "d7c5e42870530226e89bbf726165fc6473bc6085", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1763, "license_type": "no_license", "max_line_length": 90, "num_lines": 48, "path": "/structure/models.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.db import models\r\n#from customer.models import Customer\r\nfrom django.contrib.auth.forms import User\r\n\r\n# Create your models here.\r\nclass World_version(models.Model):\r\n STATUS_OF_WORLD = (\r\n ('enable', '_enable'),\r\n ('disable', '_disable'),\r\n ('comingsoon', '_comingsoon'),\r\n )\r\n\r\n name = models.CharField(max_length=50)\r\n status_of_world = models.CharField(max_length=10, choices=STATUS_OF_WORLD)\r\n\r\n def __str__(self):\r\n return 'Название - {0}'.format(self.name)\r\n\r\n\r\n\r\nclass Country(models.Model):\r\n country_name = models.CharField(max_length=50)\r\n world = models.ForeignKey(World_version, on_delete=models.CASCADE)\r\n customer = models.ForeignKey(User, on_delete=models.CASCADE, default=1)\r\n\r\n def __str__(self):\r\n return 'Название - {0}'.format(self.country_name)\r\n\r\nclass Clan(models.Model):\r\n clan_name = models.CharField(max_length=50)\r\n country = models.OneToOneField(Country, on_delete=models.CASCADE)\r\n\r\n def __str__(self):\r\n return 'Название - {0}'.format(self.clan_name)\r\n\r\nclass Customer_Account(models.Model):\r\n account_name = models.CharField(max_length=50)\r\n world = models.ForeignKey(World_version, on_delete=models.CASCADE)\r\n customer = models.ForeignKey(User, on_delete=models.CASCADE, default=1)\r\n clan = models.ForeignKey(Clan, on_delete=models.SET_NULL,null=True, blank=True)\r\n\r\n def __str__(self):\r\n return 'Название - {0}, Статус {1}'.format(self.account_name, self.world.name)\r\n\r\n\r\nclass User_settings(models.Model):\r\n customer = models.ForeignKey(User, on_delete=models.CASCADE, default=1)\r\n selected_world = models.ForeignKey(World_version, on_delete=models.CASCADE, default=4)\r\n\r\n" }, { "alpha_fraction": 0.5818713307380676, "alphanum_fraction": 0.6130604147911072, "avg_line_length": 32.20000076293945, "blob_id": "3069c03f9c6a3cb48ee0709b27b5d0ec77f279be", "content_id": "2ac764ad696cb46dae0009265ad6fdd7407d0113", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1026, "license_type": "no_license", "max_line_length": 125, "num_lines": 30, "path": "/structure/migrations/0013_auto_20181129_1639.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-29 14:39\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0012_auto_20181129_1637'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='clan',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.Clan'),\r\n ),\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='customer',\r\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\r\n ),\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='world',\r\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.World_version'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5518590807914734, "alphanum_fraction": 0.6125244498252869, "avg_line_length": 24.894737243652344, "blob_id": "7fa8137449095de33fbe9ee3caa8ee19c3d68cd9", "content_id": "2ac016b0caaf10262cbe93906b612d3fb280dccc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 113, "num_lines": 19, "path": "/structure/migrations/0011_auto_20181129_1415.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-29 12:15\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0010_auto_20181129_1218'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='clan',\r\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.Clan'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5355450510978699, "alphanum_fraction": 0.5845181941986084, "avg_line_length": 25.521739959716797, "blob_id": "b8612fdc84adf7c0af3810bd968103a4f5c9a6f7", "content_id": "2f121b924a49d5c4a188ff55c933bf8f73fafa8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 633, "license_type": "no_license", "max_line_length": 125, "num_lines": 23, "path": "/structure/migrations/0010_auto_20181129_1218.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-29 10:18\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0009_auto_20181129_1132'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='country',\r\n name='account_id',\r\n ),\r\n migrations.AddField(\r\n model_name='customer_account',\r\n name='clan',\r\n field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.Clan'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5912547707557678, "alphanum_fraction": 0.5950570106506348, "avg_line_length": 34, "blob_id": "527ba99eb2c4a01365538e9db7a21f3a08df8f9f", "content_id": "51d839751a1bbd7033b9c5c9cb97868b53bc6282", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 526, "license_type": "no_license", "max_line_length": 65, "num_lines": 15, "path": "/customer/static/customer/js/account_modal.js", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "$(document).on(\"click\", \".open-modal\", function () {\n\n// var myBookId = $(this).data('id');\n\n // var formAction = $(this).attr(\"action\");\n // var action = document.getElementsByTagName(\"form\").action;\n alert(formAction);\n // action = action + \"/12\";\n // document.getElementsByTagName(\"form\").action = action ;\n\n // $(\".modal-body #bookId\").val( myBookId );\n // As pointed out in comments,\n // it is superfluous to have to manually call the modal.\n // $('#addBookDialog').modal('show');\n});\n\n" }, { "alpha_fraction": 0.519731342792511, "alphanum_fraction": 0.5345647931098938, "avg_line_length": 21.81999969482422, "blob_id": "bdb95c0037f772c2bf0bfa832470591b96276871", "content_id": "cc65b2cd6647ec36854b4964e10ba66104b58635", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3591, "license_type": "no_license", "max_line_length": 102, "num_lines": 150, "path": "/global_maps/static/globalMaps/js/first.js", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "var k = 0.5;\r\nvar hexW = 108*k;\r\nvar hexH = 123*k;\r\nvar countO = 100;\r\n\r\n\r\n\r\n\r\nvar imgs = [\r\n 'desert_plain.png',\r\n 'desrt_hill.png',\r\n 'grass_fertility.png',\r\n 'grass_forest.png',\r\n 'grass_hill.png',\r\n 'grass_hunt.png',\r\n 'grass_plain.png',\r\n 'grass_swamp.png',\r\n 'snow_fertility.png',\r\n 'snow_forest.png',\r\n 'snow_hill.png',\r\n 'snow_hunt.png',\r\n 'snow_mountain.png',\r\n 'snow_plain.png',\r\n]\r\n\r\n\r\n\r\n\r\nvar select_img = imgs[0];\r\n\r\n/*\r\nvar sidebar = document.getElementsByClassName(\"sidenav\")[0];\r\nfor(var i=0; i<imgs.length; i++){\r\n\tvar el = document.createElement('a');\r\n\tel.innerHTML=\"<img src='img/\" + imgs[i]+ \"', width='40', height='40'>\";\r\n\tel.onclick = function(i) {\r\n return function()\r\n\t\t\t{\r\n\t\t\t\tselect_img = imgs[i];\r\n\t\t\t}\r\n }(i);\r\n\tsidebar.appendChild(el);\r\n}*/\r\n\r\n\r\n\r\n\r\nfunction Hex(i,j) {\r\n var _img = imgs[ getRandomInt(0,imgs.length) ];\r\n var _y=i;\r\n var _x=j;\r\n var _class=\"hex \"+(_y%2==0?\"even\":\"odd\");\r\n var _html = '<span>'+_x+\"-\"+_y+'</span>';\r\n var _w = hexW;\r\n var _h = hexH;\r\n var _top = _h * 0.75 * _y ;\r\n var _left = _w * _x + (_y%2==0? 0 : _w * 0.5);\r\n var _data = {\r\n img:_img,\r\n x:_x,\r\n y:_y\r\n }\r\n\r\n\r\n return {\r\n rendered: false,\r\n getElement : function () {\r\n var el = document.createElement('div');\r\n //el.innerHTML = _html;\r\n el.className = _class;\r\n\r\n el.style.backgroundImage = 'url(static/global_maps/img/'+_img+')'; //ГОВНОКОД - ПЕРЕДЕЛАТЬ\r\n el.style.width = _w+'px';\r\n el.style.height = _h+'px';\r\n el.style.top = _top+'px';\r\n el.style.left = _left+'px';\r\n el.data = _data;\r\n el.addEventListener('click', function () {\r\n\t\t\t\t//el.style.backgroundImage = 'url(img/' + select_img + ')';\r\n\r\n //alert(this.data.x+\"-\"+this.data.y);\r\n //console.log(this.data);\r\n })\r\n return el;\r\n }\r\n };\r\n}\r\n\r\nvar objects = [];\r\nfor(var i=0; i<countO;i++) {\r\n\r\n objects[i] = [];\r\n for(var j=0; j<countO;j++) {\r\n objects[i].push(new Hex(i,j));\r\n }\r\n}\r\n\r\n//var scripts= document.getElementsByTagName('script');\r\n//var mysrc= scripts[scripts.length-1].src;\r\n//alert(mysrc);\r\n\r\n\r\nvar cont = document.getElementById(\"container\");\r\n\r\ncont.style.height=hexH*.75*countO+'px';\r\ncont.style.width=hexW*countO+'px';\r\nvar posX = 0;\r\nvar posY = 0;\r\nvar R;\r\nvar windowH = window.innerHeight;\r\nvar windowW = window.innerWidth;\r\nvar scrollX = 0;\r\nvar scrollY = 0;\r\n\r\nwindow.addEventListener('scroll',function() {\r\n clearTimeout(R);\r\n R=setTimeout(render, 300);\r\n});\r\n\r\nvar render = function() {\r\n scrollY = window.pageYOffset || document.documentElement.scrollTop;\r\n scrollX = window.pageXOffset || document.documentElement.scrollLeft;\r\n windowW = window.innerWidth;\r\n windowH = window.innerHeight;\r\n\r\n cont.innerHTML = \"\";\r\n var obj;\r\n\r\n\tposX = Math.ceil(scrollX/hexW-1);\r\n posY = Math.ceil(scrollY/(hexH*0.75));\r\n\r\n\tvar countX = Math.ceil(windowW / hexW * 1.2);\r\n var countY = Math.ceil(windowH / hexH * 1.2);\r\n\r\n\tfor (var i = -2; i < countY ; i++) {\r\n\t\tfor (var j = -2; j < countX ; j++) {\r\n\t\t\tif(posY+i>=0 && posX+j>=0 && posY+i<countO && posX+j<countO){\r\n\t\t\t\tobj = objects[posY+i][posX+j];\r\n\t\t\t\tcont.appendChild(obj.getElement());\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n}\r\n\r\nvar eventScroll = new Event('scroll');\r\nwindow.dispatchEvent(eventScroll);\r\n\r\nfunction getRandomInt(min, max) {\r\n return Math.floor(Math.random() * (max - min)) + min;\r\n}\r\n" }, { "alpha_fraction": 0.7227723002433777, "alphanum_fraction": 0.7227723002433777, "avg_line_length": 18.200000762939453, "blob_id": "d2247a2f9ff2a90459e2629d7fea0d35204fcb3e", "content_id": "a7854f427bf181d5f83b85faf4ee867518d4ead1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 101, "license_type": "no_license", "max_line_length": 34, "num_lines": 5, "path": "/global_maps/apps.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\r\n\r\n\r\nclass GlobalmapsConfig(AppConfig):\r\n name = 'global_maps'\r\n" }, { "alpha_fraction": 0.6251627206802368, "alphanum_fraction": 0.6267013549804688, "avg_line_length": 32.337398529052734, "blob_id": "9e983c336f61a156d71b7df46fa8232a55207e0c", "content_id": "e9747f2c963933c91b55244432720f3abeaa1daa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8556, "license_type": "no_license", "max_line_length": 123, "num_lines": 246, "path": "/customer/views.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.shortcuts import render, redirect\r\nfrom django.views.decorators.csrf import csrf_exempt\r\n\r\nfrom django.contrib.auth import login, logout\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.models import User\r\n\r\nfrom django.shortcuts import render_to_response\r\nfrom django.http import HttpResponse, response\r\n\r\nfrom customer.forms import RegistrationForm\r\nfrom structure.models import Customer_Account, Country, Clan, World_version, User_settings\r\nfrom structure.forms import CreateAccountForm, CreateCountryForm, CreateClanForm\r\n\r\nfrom django.core.exceptions import ObjectDoesNotExist\r\n\r\n\r\ndef index(request):\r\n # return HttpResponse(\"<h3>Hello world</h3>\")\r\n return redirect ('customer/login')\r\n\r\n@csrf_exempt\r\ndef signup_view(request):\r\n # Number of visits to this view, as counted in the session variable.\r\n num_visits = request.session.get('num_visits', 0)\r\n request.session['num_visits'] = num_visits + 1\r\n\r\n if request.method == 'POST':\r\n form = RegistrationForm(request.POST)\r\n if form.is_valid():\r\n user = form.save()\r\n #log user here\r\n login(request, user)\r\n\r\n #устанавливаем мир по умолчанию = 1\r\n world = World_version.objects.all()\r\n settings = User_settings(customer=request.user, selected_world=world[0])\r\n settings.save()\r\n\r\n return render(request, 'customer/successful.html' )\r\n else:\r\n form = RegistrationForm()\r\n return render(request, 'customer/signup.html', {'form': form, 'num_visits':num_visits}, )\r\n\r\ndef login_view(request):\r\n if request.method == 'POST':\r\n form = AuthenticationForm(request.POST)\r\n if form.is_valid():\r\n user = form.get_user()\r\n login(request, user)\r\n if 'next' in request.POST:\r\n return redirect(request.POST.get('next'))\r\n else:\r\n return render(request, 'customer/successful.html')\r\n else:\r\n form = AuthenticationForm()\r\n return render(request, 'customer/login.html', {'form': form}, )\r\n\r\ndef logout_view(request):\r\n if request.method == 'POST':\r\n logout(request)\r\n return render(request, 'customer/successful.html')\r\n\r\n@login_required(login_url=\"/customer/login/\")\r\ndef profile_view(request):\r\n args = {}\r\n args['accounts'] = list_of_accounts(request)\r\n args['countries'] = list_of_countries(request)\r\n args['world_list'] = World_version.objects.all()\r\n args['selected_world'] = request.session.get('selected_world_num', 0)\r\n return render(request, 'profile/profile.html', args)\r\n\r\ndef select_world_view(request):\r\n if request.POST:\r\n\r\n #user = User.objects.get(id=request.user.id)\r\n\r\n selected_world_name = request.POST.get('world_number')\r\n world = World_version.objects.get(name = selected_world_name)\r\n\r\n try:\r\n settings = User_settings.objects.get(customer=request.user.id)\r\n settings.selected_world = world\r\n settings.save()\r\n except ObjectDoesNotExist:\r\n settings = User_settings(customer = request.user, selected_world = world)\r\n settings.save()\r\n\r\n request.session['selected_world_num'] = world.id\r\n else:\r\n request.session['selected_world_num'] = 'aaaa'\r\n\r\n return redirect('profile')\r\n\r\ndef password_reset_view(request):\r\n return render(request, 'customer/password_reset.html')\r\n\r\n''' CLASS '''\r\nclass MyAccounts:\r\n account_id = 1\r\n country = \"\"\r\n clan = \"\"\r\n account_name = \"\"\r\n world_version = \"\"\r\n\r\n def __init__(self, account_id, account_name, world_version, clan=\"-\", country=\"-\"):\r\n self.account_id = account_id\r\n self.account_name = account_name\r\n self.world_version = world_version\r\n self.country = country\r\n self.clan = clan\r\n\r\nclass MyCountry:\r\n country_id = 1\r\n country_name = \"\"\r\n num_of_accounts = 0\r\n\r\n def __init__(self, country_name, num_of_accounts, country_id):\r\n self.country_name = country_name\r\n self.num_of_accounts = num_of_accounts\r\n self.country_id = country_id\r\n\r\n\r\n'''DEF '''\r\ndef list_of_accounts(request):\r\n accounts = []\r\n\r\n settings = User_settings.objects.get(customer=request.user)\r\n list_of_accounts = Customer_Account.objects.filter(customer=request.user.id).filter(world = settings.selected_world.id)\r\n\r\n # list_of_free_accounts\r\n #list_of_accounts = Customer_Account.objects.filter(customer=request.user.id).filter(clan_id=None)\r\n\r\n for item in list_of_accounts:\r\n account_id = item.pk\r\n account_name = item.account_name\r\n world_version = item.world.name\r\n clan_name = \"-\"\r\n country_name = \"-\"\r\n\r\n if item.clan is not None:\r\n clan = Clan.objects.get(id__contains=item.clan.pk)\r\n clan_name = clan.clan_name\r\n\r\n country = Country.objects.get(id__contains=clan.country_id)\r\n country_name = country.country_name\r\n\r\n acc = MyAccounts(account_id, account_name, world_version, clan_name, country_name)\r\n accounts.append(acc)\r\n return accounts\r\n\r\ndef list_of_countries(request):\r\n countries = []\r\n countries_list = Country.objects.filter(customer_id=request.user.id)\r\n\r\n for item in countries_list:\r\n num_of_acc = 0\r\n clan_list = Clan.objects.filter(country_id=item.id)\r\n for clan_item in clan_list:\r\n acc = Customer_Account.objects.filter(clan_id=clan_item.id)\r\n num_of_acc = num_of_acc + acc.count()\r\n\r\n my_country = MyCountry(item.country_name, num_of_acc, item.id)\r\n countries.append(my_country)\r\n return countries\r\n\r\n\r\n'''ACCOUNT '''\r\n@csrf_exempt\r\ndef accounts_view(request):\r\n\r\n args = {}\r\n args['accounts'] = list_of_accounts(request)\r\n args['countries'] = list_of_countries(request)\r\n args['account_form'] = CreateAccountForm()\r\n args['country_form'] = CreateCountryForm()\r\n\r\n return render(request, 'account/accounts.html', args)\r\n\r\ndef account_detail_view(request, account_id=1):\r\n args = {}\r\n args['account'] = Customer_Account.objects.get(id = account_id)\r\n args['accounts'] = list_of_accounts(request)\r\n args['countries'] = list_of_countries(request)\r\n\r\n return render(request, 'account/account_detail.html', args)\r\n\r\n@csrf_exempt\r\ndef create_account_view(request):\r\n if request.POST:\r\n form = CreateAccountForm(request.POST)\r\n if form.is_valid():\r\n account = form.save(commit=False)\r\n user = User.objects.get(id__contains = request.user.id)\r\n account.customer_id = user.id\r\n form.save()\r\n return redirect('/customer/account/')\r\n return redirect('/')\r\n\r\n'''COUNTRY '''\r\ndef country_view(request):\r\n args = {}\r\n args['accounts'] = list_of_accounts(request)\r\n args['countries'] = list_of_countries(request)\r\n args['country_form'] = CreateCountryForm()\r\n args['clan_form'] = CreateClanForm()\r\n return render(request, 'country/country.html', args)\r\n\r\ndef create_country(request):\r\n if request.POST:\r\n form = CreateCountryForm(request.POST)\r\n\r\n if form.is_valid():\r\n #Поиск связанного аккаунта\r\n account_number = request.POST.get('account_number')\r\n account = Customer_Account.objects.get(id=account_number)\r\n\r\n # Создание страны\r\n new_country = form.save(commit=False)\r\n user = User.objects.get(id=request.user.id)\r\n new_country.customer_id = user.id\r\n new_country.world = account.world\r\n form.save()\r\n\r\n #Создание клана\r\n clan_name = request.POST.get('clan_name')\r\n new_clan = Clan(clan_name=clan_name, country=new_country )\r\n new_clan.save();\r\n\r\n #Присвоение клана для пользователя\r\n account.clan = new_clan\r\n account.save()\r\n\r\n return redirect('/customer/account/')\r\n return redirect('/')\r\n\r\n return render(request, 'profile/templates/country/country.html')\r\n\r\ndef country_detail_view(request, country_id=1):\r\n args = {}\r\n args['country'] = Country.objects.get(id = country_id)\r\n args['accounts'] = list_of_accounts(request)\r\n args['countries'] = list_of_countries(request)\r\n\r\n return render(request, 'country/country_detail.html', args)\r\n\r\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 24.399999618530273, "blob_id": "f5e96721f28109de4232057bf80e84e56d9cd0e8", "content_id": "6fa4fcc55113bf4a3f8cdc95784d0a9d282b5714", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 130, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/global_maps/admin.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom .models import Landscape, Cell\r\n\r\nadmin.site.register(Landscape)\r\nadmin.site.register(Cell)" }, { "alpha_fraction": 0.6434599161148071, "alphanum_fraction": 0.6434599161148071, "avg_line_length": 24.44444465637207, "blob_id": "f0a2cc2ee37f1a1c0fe6b9cfe8b25a5b09574568", "content_id": "7570db2c0d8e709e21192cb3389edf05ffb994d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 474, "license_type": "no_license", "max_line_length": 60, "num_lines": 18, "path": "/structure/forms.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django import forms\r\nfrom structure.models import Customer_Account, Country, Clan\r\n\r\n\r\nclass CreateAccountForm(forms.ModelForm):\r\n class Meta:\r\n model = Customer_Account\r\n fields = ['account_name', 'world']\r\n\r\nclass CreateCountryForm(forms.ModelForm):\r\n class Meta:\r\n model = Country\r\n fields = ['country_name']\r\n\r\nclass CreateClanForm(forms.ModelForm):\r\n class Meta:\r\n model = Clan\r\n fields = ['clan_name', 'country']" }, { "alpha_fraction": 0.48367953300476074, "alphanum_fraction": 0.5875371098518372, "avg_line_length": 18.823530197143555, "blob_id": "f5d0abbcf2ef3d37bc6dcb8e98b39f9824bf3ee5", "content_id": "560b77c0dc484ef49367b0d847167ff94caf3100", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 337, "license_type": "no_license", "max_line_length": 49, "num_lines": 17, "path": "/customer/migrations/0002_delete_customer.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-25 08:44\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('structure', '0006_auto_20181125_1044'),\n ('customer', '0001_initial'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Customer',\n ),\n ]\n" }, { "alpha_fraction": 0.6156716346740723, "alphanum_fraction": 0.6194030046463013, "avg_line_length": 17.14285659790039, "blob_id": "9869df5e543bf3ad46a135f9ee53d93d80143b83", "content_id": "686ca4403ee345875247febf670cc7da5283d6f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 268, "license_type": "no_license", "max_line_length": 52, "num_lines": 14, "path": "/global_maps/urls.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\r\nfrom django.urls import path\r\n\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.global_maps_view, name='global'),\r\n\r\n# url(r'^', views.index, name='index'),\r\n# url(r'^1/', views.kakogo, name='kakogo'),\r\n\r\n\r\n]\r\n" }, { "alpha_fraction": 0.5153374075889587, "alphanum_fraction": 0.5429447889328003, "avg_line_length": 27.636363983154297, "blob_id": "badfe3c302bd89cea7f376e4a2e94d09a5b03adf", "content_id": "d8c368284235c32bacf381f5e28107b933704226", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 652, "license_type": "no_license", "max_line_length": 137, "num_lines": 22, "path": "/structure/migrations/0001_initial.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-21 10:05\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n initial = True\r\n\r\n dependencies = [\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='World_version',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('name', models.CharField(max_length=50)),\r\n ('status_of_world', models.CharField(choices=[('en', 'enable'), ('dis', 'disable'), ('c', 'comingsoon')], max_length=1)),\r\n ],\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5785413980484009, "alphanum_fraction": 0.5904628038406372, "avg_line_length": 28.869565963745117, "blob_id": "9bd153f04a88cecff5837cfb48012e77a74a2fd1", "content_id": "722de050e6c50b993765f3196057fa723c80364c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1426, "license_type": "no_license", "max_line_length": 82, "num_lines": 46, "path": "/global_maps/views.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\r\nfrom django.shortcuts import render_to_response\r\nfrom global_maps.models import Landscape, Cell\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\ndef index(request):\r\n return render(request, 'globalmaps/maps.html')\r\n\r\n@login_required(login_url=\"/account/login/\")\r\ndef global_maps_view(request):\r\n list_of_cells = Cell.objects.all()\r\n list_of_hex = [];\r\n\r\n for cell in list_of_cells:\r\n # my_landscape = Landscape.objects.filter(id__contains = cell.landscape_id)\r\n my_landscape = Landscape.objects.get(id__contains=cell.landscape_id)\r\n hex = Hex(cell.coord_x, cell.coord_y, my_landscape.img)\r\n list_of_hex.append(hex)\r\n\r\n return render_to_response('globalMaps/maps.html', {'list': list_of_hex})\r\n\r\n\r\n\r\nclass Hex:\r\n img = \"desert_hill.png\";\r\n coord_x = 1;\r\n coord_y = 0;\r\n hex_width = 105;\r\n hex_height = 123;\r\n\r\n #_class = \"hex \" + (_y % 2 == 0?\"even\":\"odd\");\r\n #_html = '<span>' + _x + \"-\" + _y + '</span>';\r\n\r\n def __init__(self, x, y, img=\"desrt_hill.png\"):\r\n self.coord_x = x\r\n self.coord_y = y\r\n self.img = img\r\n\r\n def top(self):\r\n return self.hex_height * 0.75 * self.coord_y\r\n\r\n def left(self):\r\n if(self.coord_y %2 ==0):\r\n return self.hex_width * self.coord_x\r\n else:\r\n return self.hex_width * self.coord_x + self.hex_width*0.5\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5107632279396057, "alphanum_fraction": 0.5753424763679504, "avg_line_length": 26.38888931274414, "blob_id": "770c326ed83fe1419b754c6e95a336a81a075d06", "content_id": "c00d24f43b06c5bb9db596af9ffabf57e5fbf66f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 139, "num_lines": 18, "path": "/structure/migrations/0003_auto_20181121_1210.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-21 10:10\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0002_auto_20181121_1208'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='world_version',\r\n name='status_of_world',\r\n field=models.CharField(choices=[('enable', '_enable'), ('disable', '_disable'), ('comingsoon', '_comingsoon')], max_length=10),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5126050710678101, "alphanum_fraction": 0.5546218752861023, "avg_line_length": 24.44444465637207, "blob_id": "eb12bd8f00db940e8dcf92ba35ba1a7d38a6eb45", "content_id": "3eb77909cccbd447166ae55a504bbb47c613f980", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 476, "license_type": "no_license", "max_line_length": 115, "num_lines": 18, "path": "/structure/migrations/0002_auto_20181121_1208.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-21 10:08\r\n\r\nfrom django.db import migrations, models\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0001_initial'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='world_version',\r\n name='status_of_world',\r\n field=models.CharField(choices=[('e', 'enable'), ('d', 'disable'), ('c', 'comingsoon')], max_length=1),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5885946750640869, "alphanum_fraction": 0.6099796295166016, "avg_line_length": 30.733333587646484, "blob_id": "459b38a668d67e744230f978dd376b951c6ae98b", "content_id": "5445234f011634e17818721ce1b488e3fe2d0f62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 982, "license_type": "no_license", "max_line_length": 122, "num_lines": 30, "path": "/structure/migrations/0017_auto_20181207_1416.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-12-07 12:16\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\r\n ('structure', '0016_user_settings'),\r\n ]\r\n\r\n operations = [\r\n migrations.RemoveField(\r\n model_name='user_settings',\r\n name='world_by_default',\r\n ),\r\n migrations.AddField(\r\n model_name='user_settings',\r\n name='customer',\r\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\r\n ),\r\n migrations.AddField(\r\n model_name='user_settings',\r\n name='selected_world',\r\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='structure.World_version'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5597723126411438, "alphanum_fraction": 0.6204933524131775, "avg_line_length": 25.736841201782227, "blob_id": "8581e924b5a51cd5218906dea9dc28736624d7f1", "content_id": "615b66ab8f5b4c5f99557a73a81a24f46e79cbf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 122, "num_lines": 19, "path": "/structure/migrations/0018_auto_20181211_1218.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-12-11 10:18\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0017_auto_20181207_1416'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='user_settings',\r\n name='selected_world',\r\n field=models.ForeignKey(default=4, on_delete=django.db.models.deletion.CASCADE, to='structure.World_version'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 27.77777862548828, "blob_id": "b85c03fc5fc452cdd0022da2d843b162d248fbec", "content_id": "f0b4720c647cfa8e35966843f404e6629c0f5ce2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 66, "num_lines": 9, "path": "/structure/admin.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.contrib import admin\r\nfrom .models import World_version, Customer_Account, Country, Clan\r\n\r\n\r\n# Register your models here.\r\nadmin.site.register(World_version)\r\nadmin.site.register(Customer_Account)\r\nadmin.site.register(Country)\r\nadmin.site.register(Clan)" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6410256624221802, "avg_line_length": 24.899999618530273, "blob_id": "f883ef736b8174a835ceef3077e91e77d609ee8c", "content_id": "84b99b6f0db57ebb4bee5566f80435e5f49f09ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 66, "num_lines": 10, "path": "/customer/models.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\n'''\r\nclass Customer(models.Model):\r\n customer_name = models.CharField(max_length=100)\r\n password = models.CharField(max_length=50)\r\n\r\n def __str__(self):\r\n return 'Имя пользователя - {0}'.format(self.customer_name)\r\n'''\r\n\r\n\r\n" }, { "alpha_fraction": 0.4585152864456177, "alphanum_fraction": 0.5036389827728271, "avg_line_length": 22.535715103149414, "blob_id": "dc8d7d98ef086b6f6586e2da25dccc5317d1452a", "content_id": "416f61a64cced16a0edc2d5a434fb77032146927", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 687, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/structure/migrations/0008_auto_20181128_1648.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-28 14:48\r\n\r\nfrom django.db import migrations\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0007_auto_20181125_1208'),\r\n ]\r\n\r\n operations = [\r\n migrations.RenameField(\r\n model_name='clan',\r\n old_name='name',\r\n new_name='clan_name',\r\n ),\r\n migrations.RenameField(\r\n model_name='country',\r\n old_name='name',\r\n new_name='country_name',\r\n ),\r\n migrations.RenameField(\r\n model_name='customer_account',\r\n old_name='name',\r\n new_name='account_name',\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.7300000190734863, "alphanum_fraction": 0.7300000190734863, "avg_line_length": 18, "blob_id": "6f4bf3bd8a67e324fdaa7f8551a784d13b565c9a", "content_id": "1dfd5700c391f4b3b57e76e0c8fbc14c4d66dc27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 36, "num_lines": 5, "path": "/customer/apps.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\r\n\r\n\r\nclass CustomerAuthConfig(AppConfig):\r\n name = 'customer'\r\n" }, { "alpha_fraction": 0.663484513759613, "alphanum_fraction": 0.663484513759613, "avg_line_length": 34.97058868408203, "blob_id": "e4c4aaa7cb70a603df680c455cb7efc074955717", "content_id": "3bc8aece59392700e32b31b91c0ee3e7cb8ffd1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1257, "license_type": "no_license", "max_line_length": 94, "num_lines": 34, "path": "/customer/urls.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.conf.urls import url, include\r\nfrom customer import views\r\nfrom django.contrib.auth.views import LoginView\r\nfrom django.urls import path\r\n\r\nurlpatterns = [\r\n\r\n url(r'^$', views.index, name='index'),\r\n url(r'^signup/$', views.signup_view, name='signup'),\r\n url(r'^login/$', LoginView.as_view(template_name='customer/login.html'), name='login'),\r\n # url(r'^login/$', views.login_view, name='login'),\r\n url(r'^logout/$', views.logout_view, name='logout'),\r\n\r\n url(r'^profile/$', views.profile_view, name='profile'),\r\n url(r'^select-world/$', views.select_world_view, name='select_world'),\r\n\r\n url(r'^password_reset/$', views.password_reset_view, name='password_reset'),\r\n\r\n\r\n\r\n\r\n url(r'^account/$', views.accounts_view, name='my_account'),\r\n url(r'^account/get/(?P<account_id>\\d+)$', views.account_detail_view, name='account_detail'),\r\n url(r'^account/create/$', views.create_account_view, name='create_account'),\r\n\r\n\r\n url(r'^country/$', views.country_view, name='my_country'),\r\n url(r'^country/get/(?P<country_id>\\d+)$', views.country_detail_view, name='country_detail'),\r\n url(r'^country/create/$', views.create_country, name='crate_country'),\r\n\r\n\r\n # url(r'^login/verifications/$', views.loginVerifications, name='index'),\r\n\r\n]\r\n" }, { "alpha_fraction": 0.6268656849861145, "alphanum_fraction": 0.6451078057289124, "avg_line_length": 31.61111068725586, "blob_id": "dbc8c12401336880ade496ef886d1c1d76e76874", "content_id": "89a9f6cbb42db63e9a9b6c996394b1478ff22618", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 638, "license_type": "no_license", "max_line_length": 103, "num_lines": 18, "path": "/global_maps/models.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "from django.db import models\r\n\r\nclass Landscape(models.Model):\r\n name = models.CharField(max_length=50)\r\n img = models.CharField(max_length=50)\r\n\r\n def __str__(self):\r\n return 'Название - {0}, Картинка {1} '.format(self.name, self.img)\r\n\r\n\r\nclass Cell(models.Model):\r\n name = models.CharField(max_length=50)\r\n coord_x = models.IntegerField()\r\n coord_y = models.IntegerField()\r\n landscape = models.ForeignKey(Landscape, on_delete=models.CASCADE)\r\n\r\n def __str__(self):\r\n return 'Название:{0} Хоординаты Х\\Y - {1} \\ {2} '.format(self.name, self.coord_x, self.coord_y)" }, { "alpha_fraction": 0.5855072736740112, "alphanum_fraction": 0.616425096988678, "avg_line_length": 32.5, "blob_id": "fb4514b583c1ad5af67a03ffa6b2925daa2a280e", "content_id": "cc0189422d0f9a1b52def909fa7f336e2556ac7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 128, "num_lines": 30, "path": "/structure/migrations/0012_auto_20181129_1637.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-11-29 14:37\r\n\r\nfrom django.conf import settings\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0011_auto_20181129_1415'),\r\n ]\r\n\r\n operations = [\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='clan',\r\n field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='structure.Clan'),\r\n ),\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='customer',\r\n field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\r\n ),\r\n migrations.AlterField(\r\n model_name='customer_account',\r\n name='world',\r\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='structure.World_version'),\r\n ),\r\n ]\r\n" }, { "alpha_fraction": 0.5688350796699524, "alphanum_fraction": 0.5990923047065735, "avg_line_length": 29.4761905670166, "blob_id": "d0135a39c2b977c13e9622b3688d753c6ed34f0f", "content_id": "5cb4f17a4eafada5cd01ae22bf97fd78b2fb57ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 661, "license_type": "no_license", "max_line_length": 145, "num_lines": 21, "path": "/structure/migrations/0016_user_settings.py", "repo_name": "sergmalinov1/command_centr", "src_encoding": "UTF-8", "text": "# Generated by Django 2.1.2 on 2018-12-07 12:12\r\n\r\nfrom django.db import migrations, models\r\nimport django.db.models.deletion\r\n\r\n\r\nclass Migration(migrations.Migration):\r\n\r\n dependencies = [\r\n ('structure', '0015_country_customer'),\r\n ]\r\n\r\n operations = [\r\n migrations.CreateModel(\r\n name='User_settings',\r\n fields=[\r\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\r\n ('world_by_default', models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='structure.World_version')),\r\n ],\r\n ),\r\n ]\r\n" } ]
31
bids-standard/bids-validator
https://github.com/bids-standard/bids-validator
2a0bce8b04b571f96a62f9e0d713b0db0645ce70
582ee39e7d9d48e255c69c19a6974881cb74b503
c6583845baa8154ac2e687d868bb084639847d40
refs/heads/master
2023-09-01T07:52:09.720201
2023-09-01T01:56:02
2023-09-01T01:56:02
37,161,308
121
68
MIT
2015-06-09T22:08:27
2023-09-08T16:58:30
2023-09-14T21:49:27
JavaScript
[ { "alpha_fraction": 0.6328011751174927, "alphanum_fraction": 0.6618287563323975, "avg_line_length": 24.518518447875977, "blob_id": "0e06816cffb87bdcc24ede5083d955edaa8e1253", "content_id": "4414b8a4bbcbd04d27783f85d50c27353eb2f6aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2756, "license_type": "permissive", "max_line_length": 79, "num_lines": 108, "path": "/bids-validator/src/schema/context.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from '../deps/asserts.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { BIDSContext } from './context.ts'\nimport { nullReadBytes } from '../tests/nullReadBytes.ts'\n\nconst issues = new DatasetIssues()\n\nconst text = () => Promise.resolve('')\n\nconst anatJson = () =>\n Promise.resolve(\n JSON.stringify({\n rootOverwrite: 'anat',\n subOverwrite: 'anat',\n anatValue: 'anat',\n }),\n )\nconst subjectJson = () =>\n Promise.resolve(\n JSON.stringify({ subOverwrite: 'subject', subValue: 'subject' }),\n )\nconst rootJson = () =>\n Promise.resolve(JSON.stringify({ rootOverwrite: 'root', rootValue: 'root' }))\n\nconst rootFileTree = new FileTree('/', '')\nconst subjectFileTree = new FileTree('/sub-01', 'sub-01', rootFileTree)\nconst sessionFileTree = new FileTree(\n '/sub-01/ses-01',\n 'ses-01',\n subjectFileTree,\n)\nconst anatFileTree = new FileTree(\n '/sub-01/ses-01/anat',\n 'anat',\n sessionFileTree,\n)\n\nconst dataFile = {\n text,\n path: '/sub-01/ses-01/anat/sub-01_ses-01_T1w.nii.gz',\n name: 'sub-01_ses-01_T1w.nii.gz',\n size: 311112,\n ignored: false,\n stream: new ReadableStream<Uint8Array>(),\n readBytes: nullReadBytes,\n}\n\nanatFileTree.files = [\n dataFile,\n {\n text: anatJson,\n path: '/sub-01/ses-01/anat/sub-01_ses-01_T1w.json',\n name: 'sub-01_ses-01_T1w.json',\n size: 311112,\n ignored: false,\n stream: new ReadableStream<Uint8Array>(),\n readBytes: nullReadBytes,\n },\n]\n\nsessionFileTree.files = []\nsessionFileTree.directories = [anatFileTree]\n\nsubjectFileTree.files = [\n {\n text: subjectJson,\n path: '/sub-01/ses-01_T1w.json',\n name: 'ses-01_T1w.json',\n size: 311112,\n ignored: false,\n stream: new ReadableStream<Uint8Array>(),\n readBytes: nullReadBytes,\n },\n]\nsubjectFileTree.directories = [sessionFileTree]\n\nrootFileTree.files = [\n {\n text: rootJson,\n path: '/T1w.json',\n name: 'T1w.json',\n size: 311112,\n ignored: false,\n stream: new ReadableStream<Uint8Array>(),\n readBytes: nullReadBytes,\n },\n]\nrootFileTree.directories = [subjectFileTree]\n\nconst context = new BIDSContext(anatFileTree, dataFile, issues)\n\nDeno.test('test context LoadSidecar', async (t) => {\n await context.loadSidecar(rootFileTree)\n await t.step('sidecar overwrites correct fields', () => {\n // @ts-expect-error\n const { rootOverwrite, subOverwrite } = context.sidecar\n assert(rootOverwrite, 'anat')\n assert(subOverwrite, 'anat')\n })\n await t.step('sidecar adds new fields at each level', () => {\n // @ts-expect-error\n const { rootValue, subValue, anatValue } = context.sidecar\n assert(rootValue, 'root')\n assert(subValue, 'subject')\n assert(anatValue, 'anat')\n })\n})\n" }, { "alpha_fraction": 0.5853873491287231, "alphanum_fraction": 0.591549277305603, "avg_line_length": 21.719999313354492, "blob_id": "a4635cf7b0f6f0fe63f69d1bf7b01b0a55bf50bb", "content_id": "b0ec039fb3c9986f90c567f6b985f391e4b0ae9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1136, "license_type": "permissive", "max_line_length": 58, "num_lines": 50, "path": "/bids-validator/esbuild.mjs", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import path from 'path'\nimport * as esbuild from 'esbuild'\nimport GlobalsPlugin from 'esbuild-plugin-globals'\n\n// Node.js target build\nawait esbuild.build({\n entryPoints: [\n path.join(process.cwd(), 'index.js'),\n path.join(process.cwd(), 'cli.js'),\n path.join(process.cwd(), 'utils', 'consoleFormat.js'),\n ],\n outdir: path.join(process.cwd(), 'dist', 'commonjs'),\n target: 'node18',\n bundle: true,\n sourcemap: true,\n platform: 'node',\n})\n\n// Browser target build\nawait esbuild.build({\n entryPoints: [path.join(process.cwd(), 'index.js')],\n outdir: path.join(process.cwd(), 'dist', 'esm'),\n bundle: true,\n sourcemap: true,\n format: 'esm',\n define: {\n global: 'globalThis',\n window: 'globalThis',\n crypto: 'globalThis',\n os: 'globalThis',\n timers: 'globalThis',\n process: JSON.stringify({\n env: {},\n argv: [],\n stdout: '',\n stderr: '',\n stdin: '',\n version: 'v12.14.1',\n }),\n },\n external: ['pluralize'],\n plugins: [\n GlobalsPlugin({\n crypto: 'globalThis',\n os: 'globalThis',\n timers: 'globalThis',\n process: 'globalThis',\n }),\n ],\n})\n" }, { "alpha_fraction": 0.6700000166893005, "alphanum_fraction": 0.7200000286102295, "avg_line_length": 24, "blob_id": "f59b121a706f00fdc00056455a8b57c2d1897097", "content_id": "31bb36bcff7a0e2c00e2dda7ee8d7e4917ceb76c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 100, "license_type": "permissive", "max_line_length": 53, "num_lines": 4, "path": "/bids-validator/src/deps/stream.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export {\n readAll,\n readerFromStreamReader,\n} from 'https://deno.land/[email protected]/streams/mod.ts'\n" }, { "alpha_fraction": 0.49275362491607666, "alphanum_fraction": 0.5057471394538879, "avg_line_length": 25.68000030517578, "blob_id": "5b11416d1361a6c1637f22fc8841054da5d5467d", "content_id": "88d32735eb3f83903558eab231d24da04d75e355", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2001, "license_type": "permissive", "max_line_length": 69, "num_lines": 75, "path": "/bids-validator/utils/modalities.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export default {\n /**\n * Group\n *\n * Takes an array of modalities and looks for\n * groupings defined in 'modalityGroups' and\n * replaces any perfectly matched groupings with\n * the grouping object key.\n */\n group: function (modalities) {\n var modalityGroups = [\n [['magnitude1', 'magnitude2', 'phase1', 'phase2'], 'fieldmap'],\n [['magnitude1', 'magnitude2', 'phasediff'], 'fieldmap'],\n [['magnitude1', 'phasediff'], 'fieldmap'],\n [['magnitude', 'fieldmap'], 'fieldmap'],\n [['epi'], 'fieldmap'],\n ]\n\n for (\n var groupTouple_i = 0;\n groupTouple_i < modalityGroups.length;\n groupTouple_i++\n ) {\n var groupSet = modalityGroups[groupTouple_i][0]\n var groupName = modalityGroups[groupTouple_i][1]\n var match = true\n for (var i = 0; i < groupSet.length; i++) {\n if (modalities.indexOf(groupSet[i]) === -1) {\n match = false\n }\n }\n if (match) {\n modalities.push(groupName)\n for (var j = 0; j < groupSet.length; j++) {\n modalities.splice(modalities.indexOf(groupSet[j]), 1)\n }\n }\n }\n\n return modalities\n },\n /*\n * Removed in #1122, but modified in BEP-009\n *\n * isCorrectModality: path => {\n * let isCorrectModality = false\n * // MRI\n * if (\n * path[0].includes('.nii') &&\n * ['anat', 'func', 'dwi', 'pet'].indexOf(path[1]) != -1\n * ) {\n * isCorrectModality = true\n * } else if (['.json', '.tsv'].some(v => path[0].includes(v))) {\n * const testPath = path[1]\n * switch (testPath) {\n * case 'meg':\n * // MEG\n * isCorrectModality = true\n * break\n * case 'eeg':\n * // EEG\n * isCorrectModality = true\n * break\n * case 'ieeg':\n * // iEEG\n * isCorrectModality = true\n * break\n * default:\n * break\n * }\n * }\n * return isCorrectModality\n * },\n */\n}\n" }, { "alpha_fraction": 0.737758457660675, "alphanum_fraction": 0.737758457660675, "avg_line_length": 33.03703689575195, "blob_id": "e53f1b647c40425c00e121b5870a7d9a09deb98a", "content_id": "84754cb95bb25be6fcf7f2a3ca07c0e0e9822245", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 919, "license_type": "permissive", "max_line_length": 84, "num_lines": 27, "path": "/bids-validator/src/schema/walk.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { BIDSContext, BIDSContextDataset } from './context.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\n\n/** Recursive algorithm for visiting each file in the dataset, creating a context */\nexport async function* _walkFileTree(\n fileTree: FileTree,\n root: FileTree,\n issues: DatasetIssues,\n dsContext?: BIDSContextDataset,\n): AsyncIterable<BIDSContext> {\n for (const file of fileTree.files) {\n yield new BIDSContext(root, file, issues, dsContext)\n }\n for (const dir of fileTree.directories) {\n yield* _walkFileTree(dir, root, issues, dsContext)\n }\n}\n\n/** Walk all files in the dataset and construct a context for each one */\nexport async function* walkFileTree(\n fileTree: FileTree,\n issues: DatasetIssues,\n dsContext?: BIDSContextDataset,\n): AsyncIterable<BIDSContext> {\n yield* _walkFileTree(fileTree, fileTree, issues, dsContext)\n}\n" }, { "alpha_fraction": 0.6281945705413818, "alphanum_fraction": 0.6512778401374817, "avg_line_length": 38.129032135009766, "blob_id": "6033bdf5771575651f4a8f75075ebdd2e35c0985", "content_id": "fc8ee6edc9ceae3f9f07777c546f65dc05559375", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1213, "license_type": "permissive", "max_line_length": 98, "num_lines": 31, "path": "/bids-validator/validators/tsv/__tests__/checkPhenotype.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport checkPhenotype from '../checkPhenotype'\n\ndescribe('checkPhenotype', () => {\n const summary = { subjects: ['01', '02'] }\n\n it('returns no issue if there are no phenotype participants provided', () => {\n const issues = checkPhenotype([], [])\n assert.lengthOf(issues, 0)\n })\n it('returns no issues if all phenotype participants are included in the summary object', () => {\n const phenotypeParticipants = [{ list: ['01', '02'] }]\n const issues = checkPhenotype(phenotypeParticipants, summary)\n assert.lengthOf(issues, 0)\n })\n it('returns issue code 51 if phenotype participants are not the same as subjects', () => {\n const phenotypeParticipants = [\n { file: 'phenotype/test.tsv', list: ['01', '06'] },\n ]\n const issues = checkPhenotype(phenotypeParticipants, summary)\n assert.lengthOf(issues, 1)\n })\n it('returns issues for any mismatched participants.tsv files', () => {\n const phenotypeParticipants = [\n { file: 'phenotype/test_1.tsv', list: ['01', '06'] },\n { file: 'phenotype/test_2.tsv', list: ['01', '07'] },\n ]\n const issues = checkPhenotype(phenotypeParticipants, summary)\n assert.lengthOf(issues, 2)\n })\n})\n" }, { "alpha_fraction": 0.522606372833252, "alphanum_fraction": 0.522606372833252, "avg_line_length": 23.25806427001953, "blob_id": "d98b420cadf763daf268d3ee77718f4e126feff7", "content_id": "f85997262c6d514e58124f89668a53a03b4358e6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 752, "license_type": "permissive", "max_line_length": 70, "num_lines": 31, "path": "/bids-validator/validators/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// dependencies ------------------------------------------------------\nimport 'cross-fetch/polyfill'\n\nimport tsv from './tsv'\nimport json from './json'\nimport { NIFTI } from './nifti'\nimport { start as BIDS } from './bids'\nimport Events from './events'\nimport { bval } from './bval'\nimport { bvec } from './bvec'\nimport utils from '../utils'\nimport consoleFormat from '../utils/consoleFormat'\n\n// public api --------------------------------------------------------\n\nconst validate = {\n BIDS: BIDS,\n JSON: json.json,\n TSV: tsv,\n NIFTI,\n Events: Events,\n bval: bval,\n bvec: bvec,\n reformat: utils.issues.reformat,\n utils: utils,\n consoleFormat,\n}\n\n// exports -----------------------------------------------------------\n\nexport default validate\n" }, { "alpha_fraction": 0.631695032119751, "alphanum_fraction": 0.6414618492126465, "avg_line_length": 34.463687896728516, "blob_id": "699f2a9607973c26dbb0b2f2b166448a79ff830e", "content_id": "989018b2561793fa40a95eb91035d54a6ebb96fe", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6348, "license_type": "permissive", "max_line_length": 125, "num_lines": 179, "path": "/bids-validator/tests/session.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport utils from '../utils'\nconst Subject = utils.files.sessions.Subject\nimport {\n session as missingSessionFiles,\n getDataOrganization,\n getFilename,\n missingSessionWarnings,\n getSubjectFiles,\n missingFileWarnings,\n checkFileInMissingSession,\n checkMissingFile,\n} from '../validators/session'\nconst dir = process.cwd()\nconst data_dir = dir + '/bids-validator/tests/data/'\nconst missing_session_data = data_dir + 'ds006_missing-session'\n\ndescribe('session', () => {\n let filelist\n\n describe('missingSessionFiles', () => {\n describe('handling missing sessions', () => {\n beforeEach(async () => {\n filelist = await utils.files.readDir(missing_session_data)\n })\n\n it('should produce a single MISSING_SESSION warning', () => {\n const warnings = missingSessionFiles(filelist)\n const targetWarning = warnings.find(\n (warning) => warning.key === 'MISSING_SESSION',\n )\n assert.ok(targetWarning)\n })\n\n it('should not produce INCONSISTENT_SUBJECTS warnings', () => {\n const warnings = missingSessionFiles(filelist)\n warnings.forEach((warning) =>\n assert.notEqual(warning.key, 'INCONSISTENT_SUBJECTS'),\n )\n })\n })\n })\n\n describe('getDataOrganization', () => {\n it('should take a fileList of data with subjects and sessions and list and return them', async () => {\n let filelist\n await utils.files.readDir(missing_session_data).then((files) => {\n filelist = files\n })\n\n const { subjects, sessions } = getDataOrganization(filelist)\n assert.equal(typeof subjects, 'object')\n\n const subjKeys = Object.keys(subjects)\n assert.ok(subjKeys.length >= 1)\n assert.ok(subjKeys.every((key) => subjects[key] instanceof Subject))\n assert.ok(sessions.length >= 1)\n })\n })\n\n describe('getFilename', () => {\n it('should be able to extract the filename from its path', () => {\n const subjKey = 'sub-01'\n const paths = [\n '/sub-01/ses-post/anat/sub-01_ses-post_inplaneT2.nii.gz',\n '/sub-01/ses-post/anat/sub-01_ses-post_T1w.nii.gz',\n '/sub-01/ses-post/func/sub-01_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz',\n ]\n const expecteds = [\n '/ses-post/anat/<sub>_ses-post_inplaneT2.nii.gz',\n '/ses-post/anat/<sub>_ses-post_T1w.nii.gz',\n '/ses-post/func/<sub>_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz',\n ]\n\n for (let i = 0; i < paths.length; i++) {\n const result = getFilename(paths[i], subjKey)\n assert.equal(result, expecteds[i])\n }\n })\n })\n\n describe('missingSessionWarnings', () => {\n it('should take a subjects dir and a sessions list and return a list of issues', async () => {\n let filelist\n await utils.files.readDir(missing_session_data).then((files) => {\n filelist = files\n })\n const { subjects, sessions } = getDataOrganization(filelist)\n\n const sessionWarnings = missingSessionWarnings(subjects, sessions)\n assert.ok(Array.isArray(sessionWarnings))\n assert.ok(\n sessionWarnings.every(\n (warning) => warning instanceof utils.issues.Issue,\n ),\n )\n })\n })\n\n describe('getSubjectFiles', () => {\n it('should take a list of subjects and return a set containing each file', async () => {\n let filelist\n await utils.files.readDir(missing_session_data).then((files) => {\n filelist = files\n })\n const { subjects } = getDataOrganization(filelist)\n\n const subjFiles = getSubjectFiles(subjects)\n assert.ok(subjFiles.every((filename) => typeof filename === 'string'))\n assert.equal(subjFiles.length, new Set(subjFiles).size)\n\n const allFiles = Object.keys(subjects).reduce(\n (allFiles, subjKey) => allFiles.concat(subjects[subjKey].files),\n [],\n )\n assert.ok(allFiles.every((file) => subjFiles.includes(file)))\n })\n })\n\n describe('missingFileWarnings', () => {\n it('generates an issue for each file missing from each subject and returns them as a list', () => {\n const subjects = {}\n const subjKey = 'sub-01'\n const subject01 = new Subject()\n const subjFiles = [\n '/ses-post/anat/<sub>_ses-post_inplaneT2.nii.gz',\n '/ses-post/anat/<sub>_ses-post_T1w.nii.gz',\n '/ses-post/func/<sub>_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz',\n ]\n subject01.files.push(subjFiles[0])\n subjects[subjKey] = subject01\n\n const warnings = missingFileWarnings(subjects, subjFiles)\n assert.ok(Array.isArray(warnings))\n warnings.every(\n (warning) =>\n warning instanceof utils.issues.Issue && warning.code === 38,\n )\n })\n })\n\n describe('checkFileInMissingSession', () => {\n it('returns true if filepath belongs to missing session', () => {\n const filepath = '/sub-01/ses-post/anat/sub-01_ses-post_inplaneT2.nii.gz'\n const subject = new Subject()\n subject.missingSessions.push('ses-post')\n\n const inMissingSession = checkFileInMissingSession(filepath, subject)\n assert.strictEqual(inMissingSession, true)\n })\n it('returns false if filepath belongs to extant session', () => {\n const filepath = '/sub-01/ses-post/anat/sub-01_ses-post_inplaneT2.nii.gz'\n const subject = new Subject()\n subject.sessions.push('ses-post')\n\n const inMissingSession = checkFileInMissingSession(filepath, subject)\n assert.strictEqual(inMissingSession, false)\n })\n })\n\n describe('checkMissingFile', () => {\n it('returns an issue if filename is missing from subject', () => {\n const subject = new Subject()\n const subjKey = 'sub-01'\n const filenames = [\n '/ses-post/anat/<sub>_ses-post_inplaneT2.nii.gz',\n '/ses-post/anat/<sub>_ses-post_T1w.nii.gz',\n '/ses-post/func/<sub>_ses-post_task-livingnonlivingdecisionwithplainormirrorreversedtext_run-01_bold.nii.gz',\n ]\n\n assert.equal(subject.files.length, 0)\n filenames.forEach((filename) => {\n const warning = checkMissingFile(subject, subjKey, filename)\n assert.ok(warning instanceof utils.issues.Issue)\n assert.equal(warning.code, 38)\n })\n })\n })\n})\n" }, { "alpha_fraction": 0.6760509014129639, "alphanum_fraction": 0.691827654838562, "avg_line_length": 47.26226806640625, "blob_id": "eaf1c59a33534bf9896df7a0a81541c06d261943", "content_id": "b83390cbccc7a368a917282b0c7662483d2a7ae3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 57067, "license_type": "permissive", "max_line_length": 923, "num_lines": 1182, "path": "/bids-validator/utils/issues/list.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Issues\n *\n * A list of all possible issues organized by\n * issue code and including severity and reason\n * agnostic to file specifics.\n */\nexport default {\n 0: {\n key: 'INTERNAL ERROR',\n severity: 'error',\n reason: 'Internal error. SOME VALIDATION STEPS MAY NOT HAVE OCCURRED',\n },\n 1: {\n key: 'NOT_INCLUDED',\n severity: 'error',\n reason:\n 'Files with such naming scheme are not part of BIDS specification. This error is most commonly ' +\n 'caused by typos in file names that make them not BIDS compatible. Please consult the specification and ' +\n 'make sure your files are named correctly. If this is not a file naming issue (for example when including ' +\n 'files not yet covered by the BIDS specification) you should include a \".bidsignore\" file in your dataset (see' +\n ' https://github.com/bids-standard/bids-validator#bidsignore for details). Please ' +\n 'note that derived (processed) data should be placed in /derivatives folder and source data (such as DICOMS ' +\n 'or behavioural logs in proprietary formats) should be placed in the /sourcedata folder.',\n },\n 2: {\n key: 'REPETITION_TIME_GREATER_THAN',\n severity: 'warning',\n reason:\n \"'RepetitionTime' is greater than 100 are you sure it's expressed in seconds?\",\n },\n 3: {\n key: 'ECHO_TIME_GREATER_THAN',\n severity: 'warning',\n reason:\n \"'EchoTime' is greater than 1 are you sure it's expressed in seconds?\",\n },\n 4: {\n key: 'ECHO_TIME_DIFFERENCE_GREATER_THAN',\n severity: 'warning',\n reason:\n \"'EchoTimeDifference' is greater than 1 are you sure it's expressed in seconds?\",\n },\n 5: {\n key: 'TOTAL_READOUT_TIME_GREATER_THAN',\n severity: 'warning',\n reason:\n \"'TotalReadoutTime' is greater than 10 are you sure it's expressed in seconds?\",\n },\n 6: {\n key: 'ECHO_TIME_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'EchoTime' for this file. If you don't provide this information field map correction will not be possible.\",\n },\n 7: {\n key: 'PHASE_ENCODING_DIRECTION_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'PhaseEncodingDirection' for this file. If you don't provide this information field map correction will not be possible.\",\n },\n 8: {\n key: 'EFFECTIVE_ECHO_SPACING_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'EffectiveEchoSpacing' for this file. If you don't provide this information field map correction will not be possible.\",\n },\n 9: {\n key: 'TOTAL_READOUT_TIME_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'TotalReadoutTime' for this file. If you don't provide this information field map correction using TOPUP might not be possible.\",\n },\n 10: {\n key: 'REPETITION_TIME_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'RepetitionTime' for this file.\",\n },\n 11: {\n key: 'REPETITION_TIME_UNITS',\n severity: 'error',\n reason:\n \"Repetition time was not defined in seconds, milliseconds or microseconds in the scan's header.\",\n },\n 12: {\n key: 'REPETITION_TIME_MISMATCH',\n severity: 'error',\n reason:\n \"Repetition time did not match between the scan's header and the associated JSON metadata file.\",\n },\n 13: {\n key: 'SLICE_TIMING_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'SliceTiming' for this file. If you don't provide this information slice time correction will not be possible. 'Slice Timing' is the time at which each slice was acquired within each volume (frame) of the acquisition. Slice timing is not slice order -- rather, it is a list of times containing the time (in seconds) of each slice acquisition in relation to the beginning of volume acquisition.\",\n },\n 15: {\n key: 'ECHO_TIME1-2_NOT_DEFINED',\n severity: 'error',\n reason: \"You have to define 'EchoTime1' and 'EchoTime2' for this file.\",\n },\n 16: {\n key: 'ECHO_TIME_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'EchoTime' for this file.\",\n },\n 17: {\n key: 'UNITS_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'Units' for this file.\",\n },\n 18: {\n key: 'PHASE_ENCODING_DIRECTION_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'PhaseEncodingDirection' for this file.\",\n },\n 19: {\n key: 'TOTAL_READOUT_TIME_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'TotalReadoutTime' for this file.\",\n },\n 20: {\n key: 'EVENTS_COLUMN_ONSET',\n severity: 'error',\n reason: \"First column of the events file must be named 'onset'\",\n },\n 21: {\n key: 'EVENTS_COLUMN_DURATION',\n severity: 'error',\n reason: \"Second column of the events file must be named 'duration'\",\n },\n 22: {\n key: 'TSV_EQUAL_ROWS',\n severity: 'error',\n reason:\n 'All rows must have the same number of columns as there are headers.',\n },\n 23: {\n key: 'TSV_EMPTY_CELL',\n severity: 'error',\n reason:\n 'Empty cell in TSV file detected: The proper way of labeling missing values is \"n/a\".',\n },\n 24: {\n key: 'TSV_IMPROPER_NA',\n severity: 'warning',\n reason: 'A proper way of labeling missing values is \"n/a\".',\n },\n 25: {\n key: 'EVENTS_TSV_MISSING',\n severity: 'warning',\n reason:\n 'Task scans should have a corresponding events.tsv file. If this is a resting state scan you can ignore this warning or rename the task to include the word \"rest\".',\n },\n 26: {\n key: 'NIFTI_HEADER_UNREADABLE',\n severity: 'error',\n reason:\n 'We were unable to parse header data from this NIfTI file. Please ensure it is not corrupted or mislabeled.',\n },\n 27: {\n key: 'JSON_INVALID',\n severity: 'error',\n reason: 'Not a valid JSON file.',\n },\n 28: {\n key: 'GZ_NOT_GZIPPED',\n severity: 'error',\n reason: 'This file ends in the .gz extension but is not actually gzipped.',\n },\n 29: {\n key: 'VOLUME_COUNT_MISMATCH',\n severity: 'error',\n reason:\n 'The number of volumes in this scan does not match the number of volumes in the corresponding .bvec and .bval files.',\n },\n 30: {\n key: 'BVAL_MULTIPLE_ROWS',\n severity: 'error',\n reason: '.bval files should contain exactly one row of volumes.',\n },\n 31: {\n key: 'BVEC_NUMBER_ROWS',\n severity: 'error',\n reason: '.bvec files should contain exactly three rows of volumes.',\n },\n 32: {\n key: 'DWI_MISSING_BVEC',\n severity: 'error',\n reason: 'DWI scans should have a corresponding .bvec file.',\n },\n 33: {\n key: 'DWI_MISSING_BVAL',\n severity: 'error',\n reason: 'DWI scans should have a corresponding .bval file.',\n },\n 36: {\n key: 'NIFTI_TOO_SMALL',\n severity: 'error',\n reason: 'This file is too small to contain the minimal NIfTI header.',\n },\n 37: {\n key: 'INTENDED_FOR',\n severity: 'error',\n reason: \"'IntendedFor' field needs to point to an existing file.\",\n },\n 38: {\n key: 'INCONSISTENT_SUBJECTS',\n severity: 'warning',\n reason:\n 'Not all subjects contain the same files. Each subject should contain the same number of files with ' +\n 'the same naming unless some files are known to be missing.',\n },\n 39: {\n key: 'INCONSISTENT_PARAMETERS',\n severity: 'warning',\n reason: 'Not all subjects/sessions/runs have the same scanning parameters.',\n },\n 40: {\n key: 'NIFTI_DIMENSION',\n severity: 'warning',\n reason:\n \"NIfTI file's header field for dimension information blank or too short.\",\n },\n 41: {\n key: 'NIFTI_UNIT',\n severity: 'warning',\n reason:\n \"NIfTI file's header field for unit information for x, y, z, and t dimensions empty or too short\",\n },\n 42: {\n key: 'NIFTI_PIXDIM',\n severity: 'warning',\n reason:\n \"NIfTI file's header field for pixel dimension information empty or too short.\",\n },\n 43: {\n key: 'ORPHANED_SYMLINK',\n severity: 'error',\n reason:\n 'This file appears to be an orphaned symlink. Make sure it correctly points to its referent.',\n },\n 44: {\n key: 'FILE_READ',\n severity: 'error',\n reason:\n 'We were unable to read this file. Make sure it contains data (file size > 0 kB) and is not corrupted, incorrectly named, or incorrectly symlinked.',\n },\n 45: {\n key: 'SUBJECT_FOLDERS',\n severity: 'error',\n reason:\n 'There are no subject folders (labeled \"sub-*\") in the root of this dataset.',\n },\n 46: {\n key: 'BVEC_ROW_LENGTH',\n severity: 'error',\n reason:\n 'Each row in a .bvec file should contain the same number of values.',\n },\n 47: {\n key: 'B_FILE',\n severity: 'error',\n reason:\n '.bval and .bvec files must be single space delimited and contain only numerical values.',\n },\n 48: {\n key: 'PARTICIPANT_ID_COLUMN',\n severity: 'error',\n reason:\n \"Participants and phenotype .tsv files must have a 'participant_id' column.\",\n },\n 49: {\n key: 'PARTICIPANT_ID_MISMATCH',\n severity: 'error',\n reason:\n 'Participant labels found in this dataset did not match the values in participant_id column found in the participants.tsv file.',\n },\n 50: {\n key: 'TASK_NAME_MUST_DEFINE',\n severity: 'error',\n reason: \"You have to define 'TaskName' for this file.\",\n },\n 51: {\n key: 'PHENOTYPE_SUBJECTS_MISSING',\n severity: 'error',\n reason:\n 'A phenotype/ .tsv file lists subjects that were not found in the dataset.',\n },\n 52: {\n key: 'STIMULUS_FILE_MISSING',\n severity: 'error',\n reason: 'A stimulus file was declared but not found in the dataset.',\n },\n 53: {\n key: 'NO_T1W',\n severity: 'ignore',\n reason: 'Dataset does not contain any T1w scans.',\n },\n 54: {\n key: 'BOLD_NOT_4D',\n severity: 'error',\n reason: 'Bold scans must be 4 dimensional.',\n },\n 55: {\n key: 'JSON_SCHEMA_VALIDATION_ERROR',\n severity: 'error',\n reason:\n 'Invalid JSON file. The file is not formatted according the schema.',\n },\n 56: {\n key: 'Participants age 89 or higher',\n severity: 'warning',\n reason:\n 'As per section 164.514(C) of \"The De-identification Standard\" under HIPAA guidelines, participants with age 89 or higher should be tagged as 89+. More information can be found at https://www.hhs.gov/hipaa/for-professionals/privacy/special-topics/de-identification/#standard',\n },\n 57: {\n key: 'DATASET_DESCRIPTION_JSON_MISSING',\n severity: 'error',\n reason:\n 'The compulsory file /dataset_description.json is missing. See Section 03 (Modality agnostic files) of the BIDS specification.',\n },\n 58: {\n key: 'TASK_NAME_CONTAIN_ILLEGAL_CHARACTER',\n severity: 'error',\n reason:\n 'Task Name contain an Illegal Character hyphen or underscore. Please edit the filename as per BIDS spec.',\n },\n 59: {\n key: 'ACQ_NAME_CONTAIN_ILLEGAL_CHARACTER',\n severity: 'error',\n reason:\n 'acq Name contain an Illegal Character hyphen or underscore. Please edit the filename as per BIDS spec.',\n },\n 60: {\n key: 'SFORM_AND_QFORM_IN_IMAGE_HEADER_ARE_ZERO',\n severity: 'error',\n reason:\n 'sform_code and qform_code in the image header are 0. The image/file will be considered invalid or assumed to be in LAS orientation.',\n },\n 61: {\n key: 'QUICK_VALIDATION_FAILED',\n severity: 'error',\n reason:\n 'Quick validation failed - the general folder structure does not resemble a BIDS dataset. Have you chosen the right folder (with \"sub-*/\" subfolders)? Check for structural/naming issues and presence of at least one subject.',\n },\n 62: {\n key: 'SUBJECT_VALUE_CONTAINS_ILLEGAL_CHARACTER',\n severity: 'error',\n reason:\n 'Sub label contain an Illegal Character hyphen or underscore. Please edit the filename as per BIDS spec.',\n },\n 63: {\n key: 'SESSION_VALUE_CONTAINS_ILLEGAL_CHARACTER',\n severity: 'error',\n reason:\n 'Ses label contain an Illegal Character hyphen or underscore. Please edit the filename as per BIDS spec.',\n },\n 64: {\n key: 'SUBJECT_LABEL_IN_FILENAME_DOESNOT_MATCH_DIRECTORY',\n severity: 'error',\n reason:\n \"Subject label in the filename doesn't match with the path of the file. File seems to be saved in incorrect subject directory.\",\n },\n 65: {\n key: 'SESSION_LABEL_IN_FILENAME_DOESNOT_MATCH_DIRECTORY',\n severity: 'error',\n reason:\n \"Session label in the filename doesn't match with the path of the file. File seems to be saved in incorrect session directory.\",\n },\n 66: {\n key: 'SLICETIMING_VALUES_GREATOR_THAN_REPETITION_TIME',\n severity: 'error',\n reason:\n '\"SliceTiming\" value/s contains invalid value as it is greater than RepetitionTime. SliceTiming values should be in seconds not milliseconds (common mistake).',\n },\n 67: {\n key: 'NO_VALID_DATA_FOUND_FOR_SUBJECT',\n severity: 'error',\n reason: 'No BIDS compatible data found for at least one subject.',\n },\n 68: {\n key: 'FILENAME_COLUMN',\n severity: 'error',\n reason: \"_scans.tsv files must have a 'filename' column.\",\n },\n 70: {\n key: 'WRONG_NEW_LINE',\n severity: 'error',\n reason:\n \"All TSV files must use Line Feed '\\\\n' characters to denote new lines. This files uses Carriage Return '\\\\r'.\",\n },\n 71: {\n key: 'MISSING_TSV_COLUMN_CHANNELS',\n severity: 'error',\n reason:\n \"The column names of the channels file must begin with ['name', 'type', 'units']\",\n },\n 72: {\n key: 'MISSING_TSV_COLUMN_IEEG_CHANNELS',\n severity: 'error',\n reason:\n \"The column names of the channels file must begin with ['name', 'type', 'units', 'low_cutoff', 'high_cutoff']\",\n },\n 73: {\n key: 'MISSING_TSV_COLUMN_IEEG_ELECTRODES',\n severity: 'error',\n reason:\n \"The column names of the electrodes file must begin with ['name', 'x', 'y', 'z', 'size']\",\n },\n 74: {\n key: 'DUPLICATE_NIFTI_FILES',\n severity: 'error',\n reason: \"NIfTI file exist with both '.nii' and '.nii.gz' extensions.\",\n },\n 75: {\n key: 'NIFTI_PIXDIM4',\n severity: 'error',\n reason: \"NIfTI file's header is missing time dimension information.\",\n },\n 76: {\n key: 'EFFECTIVEECHOSPACING_TOO_LARGE',\n severity: 'error',\n reason: \"Abnormally high value of 'EffectiveEchoSpacing'.\",\n },\n 77: {\n key: 'UNUSED_STIMULUS',\n severity: 'warning',\n reason:\n 'There are files in the /stimuli directory that are not utilized in any _events.tsv file.',\n },\n 78: {\n key: 'CHANNELS_COLUMN_SFREQ',\n severity: 'error',\n reason:\n \"Fourth column of the channels file must be named 'sampling_frequency'\",\n },\n 79: {\n key: 'CHANNELS_COLUMN_LOWCUT',\n severity: 'error',\n reason: \"Third column of the channels file must be named 'low_cutoff'\",\n },\n 80: {\n key: 'CHANNELS_COLUMN_HIGHCUT',\n severity: 'error',\n reason: \"Third column of the channels file must be named 'high_cutoff'\",\n },\n 81: {\n key: 'CHANNELS_COLUMN_NOTCH',\n severity: 'error',\n reason: \"Third column of the channels file must be named 'notch'\",\n },\n 82: {\n key: 'CUSTOM_COLUMN_WITHOUT_DESCRIPTION',\n severity: 'warning',\n reason:\n 'Tabular file contains custom columns not described in a data dictionary',\n },\n 83: {\n key: 'ECHOTIME1_2_DIFFERENCE_UNREASONABLE',\n severity: 'error',\n reason:\n 'The value of (EchoTime2 - EchoTime1) should be within the range of 0.0001 - 0.01.',\n },\n 84: {\n key: 'ACQTIME_FMT',\n severity: 'error',\n reason:\n 'Entries in the \"acq_time\" column of _scans.tsv should be expressed in the following format YYYY-MM-DDTHH:mm:ss[.000000] (year, month, day, hour (24h), minute, second, and optionally fractional second; this is equivalent to the RFC3339 \"date-time\" format.',\n },\n 85: {\n key: 'SUSPICIOUSLY_LONG_EVENT_DESIGN',\n severity: 'warning',\n reason:\n 'The onset of the last event is after the total duration of the corresponding scan. This design is suspiciously long. ',\n },\n 86: {\n key: 'SUSPICIOUSLY_SHORT_EVENT_DESIGN',\n severity: 'warning',\n reason:\n 'The onset of the last event is less than half the total duration of the corresponding scan. This design is suspiciously short. ',\n },\n 87: {\n key: 'SLICETIMING_ELEMENTS',\n severity: 'warning',\n reason:\n \"The number of elements in the SliceTiming array should match the 'k' dimension of the corresponding NIfTI volume.\",\n },\n 88: {\n key: 'MALFORMED_BVEC',\n severity: 'error',\n reason:\n 'The contents of this .bvec file are undefined or severely malformed. ',\n },\n 89: {\n key: 'MALFORMED_BVAL',\n severity: 'error',\n reason:\n 'The contents of this .bval file are undefined or severely malformed. ',\n },\n 90: {\n key: 'SIDECAR_WITHOUT_DATAFILE',\n severity: 'error',\n reason: 'A json sidecar file was found without a corresponding data file',\n },\n 91: {\n key: '_FIELDMAP_WITHOUT_MAGNITUDE_FILE',\n severity: 'error',\n reason:\n '_fieldmap.nii[.gz] file does not have accompanying _magnitude.nii[.gz] file. ',\n },\n 92: {\n key: 'MISSING_MAGNITUDE1_FILE',\n severity: 'warning',\n reason:\n 'Each _phasediff.nii[.gz] file should be associated with a _magnitude1.nii[.gz] file.',\n },\n 93: {\n key: 'EFFECTIVEECHOSPACING_LARGER_THAN_TOTALREADOUTTIME',\n severity: 'error',\n reason:\n 'EffectiveEchoSpacing should always be smaller than TotalReadoutTime. ',\n },\n 94: {\n key: 'MAGNITUDE_FILE_WITH_TOO_MANY_DIMENSIONS',\n severity: 'error',\n reason:\n '_magnitude1.nii[.gz] and _magnitude2.nii[.gz] files must have exactly three dimensions. ',\n },\n 95: {\n key: 'T1W_FILE_WITH_TOO_MANY_DIMENSIONS',\n severity: 'error',\n reason: '_T1w.nii[.gz] files must have exactly three dimensions. ',\n },\n 96: {\n key: 'MISSING_TSV_COLUMN_EEG_ELECTRODES',\n severity: 'error',\n reason:\n \"The column names of the electrodes file must begin with ['name', 'x', 'y', 'z']\",\n },\n 97: {\n key: 'MISSING_SESSION',\n severity: 'warning',\n reason: 'Not all subjects contain the same sessions.',\n },\n 98: {\n key: 'INACCESSIBLE_REMOTE_FILE',\n severity: 'error',\n reason:\n 'This file appears to be a symlink to a remote annexed file but could not be accessed from any of the configured remotes.',\n },\n 99: {\n key: 'EMPTY_FILE',\n severity: 'error',\n reason: 'Empty files not allowed.',\n },\n 100: {\n key: 'BRAINVISION_LINKS_BROKEN',\n severity: 'error',\n reason:\n 'Internal file pointers in BrainVision file triplet (*.eeg, *.vhdr, and *.vmrk) are broken or some files do not exist.',\n },\n 101: {\n key: 'README_FILE_MISSING',\n severity: 'warning',\n reason:\n 'The recommended file /README is missing. See Section 03 (Modality agnostic files) of the BIDS specification.',\n },\n 102: {\n key: 'TOO_FEW_AUTHORS',\n severity: 'warning',\n reason:\n 'The Authors field of dataset_description.json should contain an array of fields - with one author per field. This was triggered based on the presence of only one author field. Please ignore if all contributors are already properly listed.',\n },\n 103: {\n key: 'MULTIPLE_COMMAS_IN_AUTHOR_FIELD',\n severity: 'error',\n reason:\n 'The Authors field of dataset_description.json should contain an array of fields - with one author per field. This was triggered based on the presence of multiple commas in a single author field. Please ensure your authors are properly formatted.',\n },\n 104: {\n key: 'HED_ERROR',\n severity: 'error',\n reason: 'The validation on this HED string returned an error.',\n },\n 105: {\n key: 'HED_WARNING',\n severity: 'warning',\n reason: 'The validation on this HED string returned a warning.',\n },\n 106: {\n key: 'HED_INTERNAL_ERROR',\n severity: 'error',\n reason: 'An internal error occurred during HED validation.',\n },\n 107: {\n key: 'HED_INTERNAL_WARNING',\n severity: 'warning',\n reason: 'An internal warning occurred during HED validation.',\n },\n 108: {\n key: 'HED_MISSING_VALUE_IN_SIDECAR',\n severity: 'warning',\n reason:\n 'The json sidecar does not contain this column value as a possible key to a HED string.',\n },\n 109: {\n key: 'HED_VERSION_NOT_DEFINED',\n severity: 'warning',\n reason:\n \"You should define 'HEDVersion' for this file. If you don't provide this information, the HED validation will use the latest version available.\",\n },\n 113: {\n key: 'NO_AUTHORS',\n severity: 'warning',\n reason:\n 'The Authors field of dataset_description.json should contain an array of fields - with one author per field. This was triggered because there are no authors, which will make DOI registration from dataset metadata impossible.',\n },\n 114: {\n key: 'INCOMPLETE_DATASET',\n severity: 'error',\n reason:\n 'This dataset contains remote files. If you would like to validate with remote files, use the --remoteFiles option.',\n },\n 115: {\n key: 'EMPTY_DATASET_NAME',\n severity: 'warning',\n reason:\n 'The Name field of dataset_description.json is present but empty of visible characters.',\n },\n 123: {\n key: 'INVALID JSON ENCODING',\n severity: 'error',\n reason: 'JSON files must be valid utf-8.',\n },\n 124: {\n key: 'INVALID_TSV_UNITS',\n severity: 'error',\n reason:\n 'Units in .tsv files must be valid SI units as described in the BIDS spec Appendix V (https://bids-specification.readthedocs.io/en/stable/99-appendices/05-units.html).',\n },\n 125: {\n key: 'CHANNELS_COLUMN_STATUS',\n severity: 'error',\n reason:\n 'Status column in channels.tsv files must contain only one of two values: good or bad. Per the BIDS spec: (https://bids-specification.readthedocs.io/en/stable/04-modality-specific-files/04-intracranial-electroencephalography.html#channels-description-_channelstsv).',\n },\n 126: {\n key: 'MISSING_TSV_COLUMN_TIME',\n severity: 'error',\n reason: '*_blood.tsv require a time column.',\n },\n 127: {\n key: 'NOT_IN_USE',\n severity: 'error',\n reason: 'Code 127 is currently not used or reserved.',\n },\n 128: {\n key: 'NO_GENETIC_DATABASE',\n severity: 'error',\n reason:\n 'A genetic_info.json file is present but no Database field present in Genetics object in dataset_description.json.',\n },\n 129: {\n key: 'SCANS_FILENAME_NOT_MATCH_DATASET',\n severity: 'error',\n reason:\n 'The filename in scans.tsv file does not match what is present in the BIDS dataset.',\n },\n 130: {\n key: 'CHANNELS_COLUMN_TYPE_UPPER_CASE',\n severity: 'error',\n reason:\n 'Type column in channels.tsv files should consist of upper-case characters.',\n },\n 131: {\n key: 'CHANNELS_COLUMN_TYPE',\n severity: 'error',\n reason:\n 'Type column in channels.tsv files should only consist of values allowed in the specification for MEG/EEG/iEEG data.',\n },\n 133: {\n key: 'LABELING_TYPE_MUST_DEFINE',\n severity: 'error',\n reason:\n \"You should define 'ArterialSpinLabelingType' for this file. 'ArterialSpinLabelingType' can be CASL, PCASL, PASL.\",\n },\n 134: {\n key: 'LABELING_DURATION_MUST_DEFINE',\n severity: 'error',\n reason:\n \"You should define 'LabelingDuration' for this file. 'LabelingDuration' is the total duration of the labeling pulse train, in seconds, corresponding to the temporal width of the labeling bolus for `(P)CASL`. In case all control-label volumes (or deltam or CBF) have the same `LabelingDuration`, a scalar must be specified. In case the control-label volumes (or deltam or cbf) have a different `LabelingDuration`, an array of numbers must be specified, for which any `m0scan` in the timeseries has a `LabelingDuration` of zero. In case an array of numbers is provided, its length should be equal to the number of volumes specified in `*_aslcontext.tsv`. Corresponds to DICOM Tag 0018,9258 `ASL Pulse Train Duration`.\",\n },\n 135: {\n key: 'POST_LABELING_DELAY_MUST_DEFINE',\n severity: 'error',\n reason:\n \"You should define 'PostLabelingDelay' for this file. 'PostLabelingDelay' is the time, in seconds, after the end of the labeling (for (P)CASL) or middle of the labeling pulse (for PASL) until the middle of the excitation pulse applied to the imaging slab (for 3D acquisition) or first slice (for 2D acquisition). Can be a number (for a single-PLD time series) or an array of numbers (for multi-PLD and Look-Locker). In the latter case, the array of numbers contains the PLD of each volume (i.e. each 'control' and 'label') in the acquisition order. Any image within the time-series without a PLD (e.g. an 'm0scan') is indicated by a zero. Based on DICOM Tags 0018,9079 Inversion Times and 0018,0082 InversionTime.\",\n },\n 136: {\n key: 'BACKGROUND_SUPPRESSION_MUST_DEFINE',\n severity: 'error',\n reason:\n \"You should define 'BackgroundSuppression' for this file. 'BackGroundSuppression' is a boolean indicating if background suppression is used.\",\n },\n 137: {\n key: 'VASCULAR_CRUSHING_MUST_DEFINE',\n severity: 'warning',\n reason:\n \"It is recommended to define 'VascularCrushing' for this file. 'VascularCrushing' is a boolean value indicating if an ASL crusher method is used.\",\n },\n 138: {\n key: 'PULSE_SEQUENCE_DETAILS_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'PulseSequenceDetails' for this file. 'PulseSequenceDetails' is the information beyond pulse sequence type that identifies the specific pulse sequence used (for example, 'Standard Siemens Sequence distributed with the VB17 software', 'Siemens WIP ### version #.##', or 'Sequence written by X using a version compiled on MM/DD/YYYY').\",\n },\n 139: {\n key: 'BLACKLISTED_MODALITY',\n severity: 'error',\n reason:\n 'Found a modality that has been blacklisted through validator configuration.',\n },\n 140: {\n key: '140_EMPTY',\n severity: 'warning',\n reason: '',\n },\n 141: {\n key: '141_EMPTY',\n severity: 'warning',\n reason: '',\n },\n 142: {\n key: 'LABELING_SLAB_THICKNESS_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'LabelingSlabThickness' for this file. 'LabelingSlabThickness' is the thickness of the labeling slab in millimeters. For non-selective FAIR a zero is entered. Corresponds to DICOM Tag 0018,9254 ASL Slab Thickness.\",\n },\n 143: {\n key: 'ACQUISITION_VOXELSIZE_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'AcquisitionVoxelSize' for this file. 'AcquisitionVoxelSize' is an array of numbers with a length of 3, in millimeters. This parameter denotes the original acquisition voxel size, excluding any inter-slice gaps and before any interpolation or resampling within reconstruction or image processing. Any point spread function effects (e.g. due to T2-blurring) that would decrease the effective resolution are not considered here.\",\n },\n 144: {\n key: 'BACKGROUND_SUPPRESSION_PULSE_TIME_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'BackgroundSuppressionPulseTime' for this file, when the 'BackgroundSuppression' is set to true. 'BackGroundSuppressionPulseTime' is an array of numbers containing timing, in seconds, of the background suppression pulses with respect to the start of the labeling. In case of multi-PLD with different background suppression pulse times, only the pulse time of the first PLD should be defined.\",\n },\n 145: {\n key: 'VASCULAR_CRUCHING_VENC_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'VascularCrushingVENC' for this file, when the 'VascularCrushing' is set to true. 'VascularCrushingVENC' is the crusher gradient strength, in centimeters per second. Specify either one number for the total time-series, or provide an array of numbers, for example when using QUASAR, using the value zero to identify volumes for which 'VascularCrushing' was turned off. Corresponds to DICOM Tag 0018,925A ASL Crusher Flow Limit.\",\n },\n 147: {\n key: 'PASL_BOLUS_CUT_OFF_FLAG',\n severity: 'error',\n reason:\n \"You should define the 'BolusCutOffFlag' for this file. 'BolusCutOffFlag' is a boolean indicating if a bolus cut-off technique is used. Corresponds to DICOM Tag 0018,925C ASL Bolus Cut-off Flag.\",\n },\n 149: {\n key: 'PASL_BOLUS_CUT_OFF_DELAY_TIME',\n severity: 'error',\n reason:\n \"It is required to define 'BolusCutOffDelayTime' for this file, when 'BolusCutOffFlag' is set to true. 'BolusCutOffDelayTime' is the duration between the end of the labeling and the start of the bolus cut-off saturation pulse(s), in seconds. This can be a number or array of numbers, of which the values must be non-negative and monotonically increasing, depending on the number of bolus cut-off saturation pulses. For Q2TIPS, only the values for the first and last bolus cut-off saturation pulses are provided. Based on DICOM Tag 0018,925F ASL Bolus Cut-off Delay Time.\",\n },\n 150: {\n key: 'PASL_BOLUS_CUT_OFF_TECHNIQUE',\n severity: 'error',\n reason:\n \"It is required to define 'BolusCutOffTechnique' for this file, when 'BolusCutOffFlag' is set to true. 'BolusCutOffTechnique' is the name of the technique used (e.g. Q2TIPS, QUIPSS, QUIPSSII). Corresponds to DICOM Tag 0018,925E ASL Bolus Cut-off Technique.\",\n },\n 153: {\n key: 'M0Type_NOT_SET',\n severity: 'error',\n reason:\n \"You should define the 'M0Type' for this file. 'M0Type' describes the presence of M0 information, as either: “Separate” when a separate `*_m0scan.nii[.gz]` is present, “Included” when an m0scan volume is contained within the current ‘*_asl.nii[.gz]’, “Estimate” when a single whole-brain M0 value is provided, or “Absent” when no specific M0 information is present.\",\n },\n 154: {\n key: 'M0Type_SET_INCORRECTLY',\n severity: 'error',\n reason:\n \"M0Type was not defined correctly. If 'M0Type' is equal to included, the corresponding '*_aslcontext.tsv' should contain the 'm0scan' volume.\",\n },\n 155: {\n key: 'MRACQUISITIONTYPE_MUST_DEFINE',\n severity: 'error',\n reason:\n \"You should define 'MRAcquisitionType' for this file. 'MRAcquistionType' is the type of sequence readout with possible values: `2D` or `3D`. Corresponds to DICOM Tag 0018,0023 `MR Acquisition Type`.\",\n },\n 156: {\n key: 'ACQUISITION_VOXELSIZE_WRONG',\n severity: 'warning',\n reason:\n \"The 'AcquisitionVoxelSize' field length is not 3. 'AcquisitionVoxelSize' should be defined as an array of numbers with a length of 3, in millimeters. This parameter denotes the original acquisition voxel size, excluding any inter-slice gaps and before any interpolation or resampling within reconstruction or image processing. Any point spread function effects (e.g. due to T2-blurring) that would decrease the effective resolution are not considered here.\",\n },\n 157: {\n key: 'LABELLING_DURATION_LENGTH_NOT_MATCHING_NIFTI',\n severity: 'error',\n reason:\n \"The number of values for 'LabelingDuration' for this file does not match the 4th dimension of the NIfTI header. 'LabelingDuration' is the total duration of the labeling pulse train, in seconds, corresponding to the temporal width of the labeling bolus for `(P)CASL`. In case all control-label volumes (or deltam or CBF) have the same `LabelingDuration`, a scalar must be specified. In case the control-label volumes (or deltam or cbf) have a different `LabelingDuration`, an array of numbers must be specified, for which any `m0scan` in the timeseries has a `LabelingDuration` of zero. In case an array of numbers is provided, its length should be equal to the number of volumes specified in `*_aslcontext.tsv`. Corresponds to DICOM Tag 0018,9258 `ASL Pulse Train Duration`.\",\n },\n 164: {\n key: 'ASL_MANUFACTURER_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'Manufacturer' for this file. 'Manufacturer' is the manufacturer of the equipment that produced the composite instances. Corresponds to DICOM Tag 0008, 0070 Manufacturer\",\n },\n 165: {\n key: 'ASLCONTEXT_TSV_NOT_CONSISTENT',\n severity: 'error',\n reason:\n \"The number of volumes in the '*_aslcontext.tsv' for this file does not match the number of values in the NIfTI header.\",\n },\n 166: {\n key: 'LOOK_LOCKER_FLIP_ANGLE_MISSING',\n severity: 'error',\n reason:\n \"You should define 'FlipAngle' for this file, in case of a LookLocker acquisition. 'FlipAngle' is the flip angle (FA) for the acquisition, specified in degrees. Corresponds to: DICOM Tag 0018, 1314 `Flip Angle`. The data type number may apply to files from any MRI modality concerned with a single value for this field, or to the files in a file collection where the value of this field is iterated using the flip entity. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL or variable flip angle fMRI sequences.\",\n },\n 167: {\n key: 'FLIP_ANGLE_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'FlipAngle' for this file. 'FlipAngle' is the flip angle (FA) for the acquisition, specified in degrees. Corresponds to: DICOM Tag 0018, 1314 `Flip Angle`. The data type number may apply to files from any MRI modality concerned with a single value for this field, or to the files in a file collection where the value of this field is iterated using the flip entity. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL or variable flip angle fMRI sequences.\",\n },\n 168: {\n key: 'FLIP_ANGLE_NOT_MATCHING_NIFTI',\n severity: 'error',\n reason:\n \"The number of values for 'FlipAngle' for this file does not match the 4th dimension of the NIfTI header. 'FlipAngle' is the flip angle (FA) for the acquisition, specified in degrees. Corresponds to: DICOM Tag 0018, 1314 `Flip Angle`. The data type number may apply to files from any MRI modality concerned with a single value for this field, or to the files in a file collection where the value of this field is iterated using the flip entity. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL or variable flip angle fMRI sequences.\",\n },\n 169: {\n key: 'LABELING_DURATION_PASL_INCONSISTENT',\n severity: 'error',\n reason:\n \"The 'LabelingDuration' for PASL 'ArterialSpinLabelingType' can be only a numerical value put to zero or unset. 'LabelingDuration' is the total duration of the labeling pulse train, in seconds, corresponding to the temporal width of the labeling bolus for `(P)CASL`. In case all control-label volumes (or deltam or CBF) have the same `LabelingDuration`, a scalar must be specified. In case the control-label volumes (or deltam or cbf) have a different `LabelingDuration`, an array of numbers must be specified, for which any `m0scan` in the timeseries has a `LabelingDuration` of zero. In case an array of numbers is provided, its length should be equal to the number of volumes specified in `*_aslcontext.tsv`. Corresponds to DICOM Tag 0018,9258 `ASL Pulse Train Duration`.\",\n },\n 170: {\n key: 'CONTINOUS_RECORDING_MISSING_JSON',\n severity: 'error',\n reason:\n 'Continous recording data files are required to have an associated JSON metadata file.',\n },\n 171: {\n key: 'VOLUME_TIMING_MISSING_ACQUISITION_DURATION',\n severity: 'error',\n reason:\n \"The field 'VolumeTiming' requires 'AcquisitionDuration' or 'SliceTiming' to be defined.\",\n },\n 172: {\n key: 'FLIP_ANGLE_NOT_MATCHING_ASLCONTEXT_TSV',\n severity: 'error',\n reason:\n \"The number of values for 'FlipAngle' for this file does not match the number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'. 'FlipAngle' is the flip angle (FA) for the acquisition, specified in degrees. Corresponds to: DICOM Tag 0018, 1314 `Flip Angle`. The data type number may apply to files from any MRI modality concerned with a single value for this field, or to the files in a file collection where the value of this field is iterated using the flip entity. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL or variable flip angle fMRI sequences.\",\n },\n 173: {\n key: 'POST_LABELING_DELAY_NOT_MATCHING_NIFTI',\n severity: 'error',\n reason:\n \"The number of values for 'PostLabelingDelay' for this file does not match the 4th dimension of the NIfTI header. 'PostLabelingDelay' is the time, in seconds, after the end of the labeling (for (P)CASL) or middle of the labeling pulse (for PASL) until the middle of the excitation pulse applied to the imaging slab (for 3D acquisition) or first slice (for 2D acquisition). Can be a number (for a single-PLD time series) or an array of numbers (for multi-PLD and Look-Locker). In the latter case, the array of numbers contains the PLD of each volume (i.e. each 'control' and 'label') in the acquisition order. Any image within the time-series without a PLD (e.g. an 'm0scan') is indicated by a zero. Based on DICOM Tags 0018,9079 Inversion Times and 0018,0082 InversionTime.\",\n },\n 174: {\n key: 'POST_LABELING_DELAY_NOT_MATCHING_ASLCONTEXT_TSV',\n severity: 'error',\n reason:\n \"'The number of values for PostLabelingDelay' for this file does not match the number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'.'PostLabelingDelay' is the time, in seconds, after the end of the labeling (for (P)CASL) or middle of the labeling pulse (for PASL) until the middle of the excitation pulse applied to the imaging slab (for 3D acquisition) or first slice (for 2D acquisition). Can be a number (for a single-PLD time series) or an array of numbers (for multi-PLD and Look-Locker). In the latter case, the array of numbers contains the PLD of each volume (i.e. each 'control' and 'label') in the acquisition order. Any image within the time-series without a PLD (e.g. an 'm0scan') is indicated by a zero. Based on DICOM Tags 0018,9079 Inversion Times and 0018,0082 InversionTime.\",\n },\n 175: {\n key: 'LABELLING_DURATION_NOT_MATCHING_ASLCONTEXT_TSV',\n severity: 'error',\n reason:\n \"The number of values for 'LabelingDuration' for this file does not match the number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'. 'LabelingDuration' is the total duration of the labeling pulse train, in seconds, corresponding to the temporal width of the labeling bolus for `(P)CASL`. In case all control-label volumes (or deltam or CBF) have the same `LabelingDuration`, a scalar must be specified. In case the control-label volumes (or deltam or cbf) have a different `LabelingDuration`, an array of numbers must be specified, for which any `m0scan` in the timeseries has a `LabelingDuration` of zero. In case an array of numbers is provided, its length should be equal to the number of volumes specified in `*_aslcontext.tsv`. Corresponds to DICOM Tag 0018,9258 `ASL Pulse Train Duration`.\",\n },\n 176: {\n key: 'ASLCONTEXT_TSV_INCONSISTENT',\n severity: 'error',\n reason:\n \"In the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv', the 'volume_type' can only be filled with volumes ['cbf' ,'m0scan', 'label', 'control', 'deltam'].\",\n },\n 177: {\n key: 'REPETITIONTIMEPREPARATION_NOT_MATCHING_ASLCONTEXT_TSV',\n severity: 'error',\n reason:\n \"The number of values of 'RepetitionTimePreparation' for this file does not match the number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'. 'RepetitionTimePreparation' is the interval, in seconds, that it takes a preparation pulse block to re-appear at the beginning of the succeeding (essentially identical) pulse sequence block. The data type number may apply to files from any MRI modality concerned with a single value for this field. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL.\",\n },\n 178: {\n key: 'VOLUME_TIMING_AND_REPETITION_TIME_MUTUALLY_EXCLUSIVE',\n severity: 'error',\n reason:\n \"The fields 'VolumeTiming' and 'RepetitionTime' for this file are mutually exclusive. Choose 'RepetitionTime' when the same repetition time is used for all volumes, or 'VolumeTiming' when variable times are used.\",\n },\n 179: {\n key: 'BACKGROUND_SUPPRESSION_PULSE_NUMBER_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'BackgroundSuppressionNumberPulses' for this file, in case 'BackgroundSuppression' is set to true. 'BackgroundSuppressionNumberPulses' is the number of background suppression pulses used. Note that this excludes any effect of background suppression pulses applied before the labeling.\",\n },\n 180: {\n key: 'BACKGROUND_SUPPRESSION_PULSE_NUMBER_NOT_CONSISTENT',\n severity: 'warning',\n reason:\n \"The 'BackgroundSuppressionNumberPulses' field is not consistent with the length of 'BackgroundSuppressionPulseTime'. 'BackgroundSuppressionNumberPulses' is the number of background suppression pulses used. Note that this excludes any effect of background suppression pulses applied before the labeling.\",\n },\n 181: {\n key: 'TOTAL_ACQUIRED_VOLUMES_NOT_CONSISTENT',\n severity: 'warning',\n reason:\n \"The number of values for 'TotalAcquiredVolumes' for this file does not match number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'. 'TotalAcquiredVolumes' is the original number of 3D volumes acquired for each volume defined in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'.\",\n },\n 182: {\n key: 'MAGNETIC_FIELD_STRENGTH_MISSING',\n severity: 'error',\n reason:\n \"You should define 'MagneticFieldStrength' for this file. 'MagneticFieldStrength' is the nominal field strength of MR magnet in Tesla. Corresponds to DICOM Tag 0018,0087 'Magnetic Field Strength'.\",\n },\n 183: {\n key: 'SLICE_TIMING_NOT_DEFINED_2D_ASL',\n severity: 'error',\n reason:\n \"'You should define SliceTiming', in case 'SequenceType' is set to a 2D sequence. 'SliceTiming' is the time at which each slice was acquired within each volume (frame) of the acquisition. Slice timing is not slice order -- rather, it is a list of times containing the time (in seconds) of each slice acquisition in relation to the beginning of volume acquisition. The list goes through the slices along the slice axis in the slice encoding dimension (see below). Note that to ensure the proper interpretation of the `SliceTiming` field, it is important to check if the OPTIONAL `SliceEncodingDirection` exists. In particular, if `SliceEncodingDirection` is negative, the entries in `SliceTiming` are defined in reverse order with respect to the slice axis, such that the final entry in the `SliceTiming` list is the time of acquisition of slice 0. Without this parameter slice time correction will not be possible. \",\n },\n 184: {\n key: 'POST_LABELING_DELAY_GREATER',\n severity: 'warning',\n reason:\n \"'PostLabelingDelay' is greater than 10, are you sure it's expressed in seconds? 'PostLabelingDelay' is the time, in seconds, after the end of the labeling (for (P)CASL) or middle of the labeling pulse (for PASL) until the middle of the excitation pulse applied to the imaging slab (for 3D acquisition) or first slice (for 2D acquisition). Can be a number (for a single-PLD time series) or an array of numbers (for multi-PLD and Look-Locker). In the latter case, the array of numbers contains the PLD of each volume (i.e. each 'control' and 'label') in the acquisition order. Any image within the time-series without a PLD (e.g. an 'm0scan') is indicated by a zero. Based on DICOM Tags 0018,9079 Inversion Times and 0018,0082 InversionTime.\",\n },\n 186: {\n key: 'BOLUS_CUT_OFF_DELAY_TIME_GREATER',\n severity: 'warning',\n reason:\n \"'BolusCutOffDelayTime' is greater than 10, are you sure it's expressed in seconds? 'BolusCutOffDelayTime' is duration between the end of the labeling and the start of the bolus cut-off saturation pulse(s), in seconds. This can be a number or array of numbers, of which the values must be non-negative and monotonically increasing, depending on the number of bolus cut-off saturation pulses. For Q2TIPS, only the values for the first and last bolus cut-off saturation pulses are provided. Based on DICOM Tag 0018,925F ASL Bolus Cut-off Delay Time.\",\n },\n 187: {\n key: 'LABELING_DURATION_GREATER',\n severity: 'warning',\n reason:\n \"'LabelingDuration' is greater than 10, are you sure it's expressed in seconds? 'LabelingDuration' is the total duration of the labeling pulse train, in seconds, corresponding to the temporal width of the labeling bolus for `(P)CASL`. In case all control-label volumes (or deltam or CBF) have the same `LabelingDuration`, a scalar must be specified. In case the control-label volumes (or deltam or cbf) have a different `LabelingDuration`, an array of numbers must be specified, for which any `m0scan` in the timeseries has a `LabelingDuration` of zero. In case an array of numbers is provided, its length should be equal to the number of volumes specified in `*_aslcontext.tsv`. Corresponds to DICOM Tag 0018,9258 `ASL Pulse Train Duration`.\",\n },\n 188: {\n key: 'VOLUME_TIMING_NOT_MONOTONICALLY_INCREASING',\n severity: 'error',\n reason:\n \"'VolumeTiming' is not monotonically increasing. 'VolumeTiming' is the time at which each volume was acquired during the acquisition, referring to the start of each readout in the ASL timeseries. Use this field instead of the 'RepetitionTime' field in the case that the ASL timeseries have a non-uniform time distance between acquired volumes. The list must have the same length as the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv', and the numbers must be non-negative and monotonically increasing. If 'VolumeTiming' is defined, this requires acquisition time (TA) to be defined via 'AcquisitionDuration'.\",\n },\n 189: {\n key: 'CASL_PCASL_NOT_ALLOWED_FIELDS',\n severity: 'error',\n reason:\n \"You defined one of the not allowed fields in case of CASL or PCASL 'ArterialSpinLabelingType'. Please verify which field among 'PASLType', 'LabelingSlabThickness' 'BolusCutOffFlag', 'BolusCutOffTimingSequence', 'BolusCutOffDelayTime' and 'BolusCutOffTechnique' you have filled.\",\n },\n 190: {\n key: 'PASL_NOT_ALLOWED_FIELDS',\n severity: 'error',\n reason:\n \"You defined one of the not allowed fields in case of PASL 'ArterialSpinLabelingType'. Please verify which field among 'CASLType', 'PCASLType' 'LabelingPulseAverageGradient', 'LabelingPulseMaximumGradient', 'LabelingPulseAverageB1', 'LabelingPulseDuration', 'LabelingPulseFlipAngle', 'LabelingPulseInterval', 'LabelingDuration' you have filled.\",\n },\n 191: {\n key: 'PCASL_CASL_LABELING_TYPE_NOT_ALLOWED',\n severity: 'error',\n reason:\n \"You defined either the 'CASLType' with a PCASL 'LabellingType' or the 'PCASLType' with a CASL 'LabellingType'. This is not allowed, please check that these fields are filled correctly.\",\n },\n 192: {\n key: 'BOLUS_CUT_OFF_DELAY_TIME_NOT_MONOTONICALLY_INCREASING',\n severity: 'error',\n reason:\n \"'BolusCutOffDelayTime' is not monotonically increasing. 'BolusCutOffDelayTime' is the duration between the end of the labeling and the start of the bolus cut-off saturation pulse(s), in seconds. This can be a number or array of numbers, of which the values must be non-negative and monotonically increasing, depending on the number of bolus cut-off saturation pulses. For Q2TIPS, only the values for the first and last bolus cut-off saturation pulses are provided. Based on DICOM Tag 0018,925F ASL Bolus Cut-off Delay Time.\",\n },\n 193: {\n key: 'ECHO_TIME_NOT_DEFINED',\n severity: 'error',\n reason:\n \"You must define 'EchoTime' for this file. 'EchoTime' is the echo time (TE) for the acquisition, specified in seconds. Corresponds to DICOM Tag 0018, 0081 Echo Time (please note that the DICOM term is in milliseconds not seconds). The data type number may apply to files from any MRI modality concerned with a single value for this field, or to the files in a file collection where the value of this field is iterated using the echo entity. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL or variable echo time fMRI sequences.\",\n },\n 194: {\n key: 'MRACQUISITIONTYPE_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'MRAcquisitionType' for this file. 'MRAcquistionType' is the type of sequence readout with possible values: `2D` or `3D`. Corresponds to DICOM Tag 0018,0023 `MR Acquisition Type`.\",\n },\n 195: {\n key: 'M0ESTIMATE_NOT_DEFINED',\n severity: 'error',\n reason:\n \"You must define 'M0Estimate' for this file, in case 'M0Type' is defined as 'Estimate'. 'M0Estimate' is a single numerical whole-brain M0 value (referring to the M0 of blood), only if obtained externally (for example retrieved from CSF in a separate measurement).\",\n },\n 196: {\n key: 'ECHO_TIME_NOT_CONSISTENT',\n severity: 'warning',\n reason:\n \"The number of values for 'EchoTime' for this file does not match number of volumes in the 'sub-<label>[_ses-<label>][_acq-<label>][_rec-<label>][_run-<index>]_aslcontext.tsv'. 'EchoTime' is the echo time (TE) for the acquisition, specified in seconds. \",\n },\n 197: {\n key: 'ECHO_TIME_ELEMENTS',\n severity: 'warning',\n reason:\n \"The number of elements in the 'EchoTime' array should match the 'k' dimension of the corresponding NIfTI volume.\",\n },\n 198: {\n key: 'M0Type_SET_INCORRECTLY_TO_ABSENT',\n severity: 'error',\n reason:\n \"You defined M0Type as 'absent' while including a separate '*_m0scan.nii[.gz]' and '*_m0scan.json', or defining the 'M0Estimate' field. This is not allowed, please check that this field are filled correctly.\",\n },\n 199: {\n key: 'M0Type_SET_INCORRECTLY_TO_ABSENT_IN_ASLCONTEXT',\n severity: 'error',\n reason:\n \"You defined M0Type as 'absent' while including an m0scan volume within the '*_aslcontext.tsv'. This is not allowed, please check that this field are filled correctly.\",\n },\n 200: {\n key: 'REPETITION_TIME_PREPARATION_MISSING',\n severity: 'error',\n reason:\n \"You must define 'RepetitionTimePreparation' for this file. 'RepetitionTimePreparation' is the interval, in seconds, that it takes a preparation pulse block to re-appear at the beginning of the succeeding (essentially identical) pulse sequence block. The data type number may apply to files from any MRI modality concerned with a single value for this field. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL.\",\n },\n 201: {\n key: 'REPETITIONTIME_PREPARATION_NOT_CONSISTENT',\n severity: 'error',\n reason:\n \"The number of values for 'RepetitionTimePreparation' for this file does not match the 4th dimension of the NIfTI header. 'RepetitionTimePreparation' is the interval, in seconds, that it takes a preparation pulse block to re-appear at the beginning of the succeeding (essentially identical) pulse sequence block. The data type number may apply to files from any MRI modality concerned with a single value for this field. The data type array provides a value for each volume in a 4D dataset and should only be used when the volume timing is critical for interpretation of the data, such as in ASL.\",\n },\n 202: {\n key: 'M0Type_SET_INCORRECTLY',\n severity: 'error',\n reason:\n \"M0Type was not defined correctly. If 'M0Type' is equal to separate, the dataset should include a *_m0scan.nii[.gz] and *_m0scan.json file.\",\n },\n 211: {\n key: 'TSV_MISSING_REQUIRED_COLUMN',\n severity: 'error',\n reason:\n 'A TSV file is missing a column required by a value in its JSON sidecar.',\n },\n 212: {\n key: 'PARTICIPANT_ID_PATTERN',\n severity: 'error',\n reason:\n 'Participant_id column labels must consist of the pattern \"sub-<subject_id>\".',\n },\n 213: {\n key: 'README_FILE_SMALL',\n severity: 'warning',\n reason:\n 'The recommended file /README is very small. Please consider expanding it with additional information about the dataset.',\n },\n 214: {\n key: 'SAMPLES_TSV_MISSING',\n severity: 'error',\n reason:\n 'The compulsory file /samples.tsv is missing. See Section 03 (Modality agnostic files) of the BIDS specification.',\n },\n 215: {\n key: 'SAMPLE_ID_PATTERN',\n severity: 'error',\n reason:\n 'sample_id column labels must consist of the pattern \"sample-<sample_id>\".',\n },\n 216: {\n key: 'SAMPLE_ID_COLUMN',\n severity: 'error',\n reason: \"Samples .tsv files must have a 'sample_id' column.\",\n },\n 217: {\n key: 'PARTICIPANT_ID_COLUMN',\n severity: 'error',\n reason: \"Samples .tsv files must have a 'participant_id' column.\",\n },\n 218: {\n key: 'SAMPLE_TYPE_COLUMN',\n severity: 'error',\n reason: \"Samples .tsv files must have a 'sample_type' column.\",\n },\n 219: {\n key: 'SAMPLE_TYPE_VALUE',\n severity: 'error',\n reason:\n 'sample_type MUST consist of one of the following values: cell line, in vitro differentiated cells, primary cell, cell-free sample, cloning host, tissue, whole organisms, organoid or technical sample.',\n },\n 220: {\n key: 'SAMPLE_ID_DUPLICATE',\n severity: 'error',\n reason:\n 'Each sample from a same subject MUST be described by one and only one row.',\n },\n 221: {\n key: 'PIXEL_SIZE_INCONSISTENT',\n severity: 'error',\n reason:\n 'PixelSize need to be consistent with PhysicalSizeX, PhysicalSizeY and PhysicalSizeZ OME metadata fields',\n },\n 222: {\n key: 'INVALID_PIXEL_SIZE_UNIT',\n severity: 'warning',\n reason: 'PixelSize consistency is only validated for \"mm\", \"µm\" and \"nm\".',\n },\n 223: {\n key: 'CHUNK_TRANSFORMATION_MATRIX_MISSING',\n severity: 'warning',\n reason:\n \"It is recommended to define 'ChunkTransformationMatrix' for this file.\",\n },\n 224: {\n key: 'OPTIONAL_FIELD_INCONSISTENT',\n severity: 'error',\n reason: 'Optional JSON field is not consistent with the OME-TIFF metadata',\n },\n 225: {\n key: 'NO_VALID_JSON',\n severity: 'error',\n reason: 'No valid JSON file found for this file',\n },\n 226: {\n key: 'UNSUPPORTED_BIG_TIFF',\n severity: 'warning',\n reason: 'Metadata consistency check skipped for BigTiff OME-TIFF file',\n },\n 227: {\n key: 'INCONSISTENT_TIFF_EXTENSION',\n severity: 'error',\n reason: 'Inconsistent TIFF file type and extension',\n },\n 228: {\n key: 'MULTIPLE_README_FILES',\n severity: 'error',\n reason:\n 'A BIDS dataset MUST NOT contain more than one `README` file (with or without extension) at its root directory.',\n },\n 229: {\n key: 'INCORRECT_ORDER_TSV_COLUMN_CHANNELS_IEEG',\n severity: 'error',\n reason:\n \"The column names of the IEEG channels file must be in the following order ['name', 'type', 'units', 'low_cutoff', 'high_cutoff']\",\n },\n 230: {\n key: 'INCORRECT_ORDER_TSV_COLUMN_CHANNELS_EEG',\n severity: 'error',\n reason:\n \"The column names of the EEG channels file must be in the following order ['name', 'type', 'units']\",\n },\n 231: {\n key: 'TSV_COLUMN_HEADER_DUPLICATE',\n severity: 'error',\n reason:\n 'Two elements in the first row of a TSV are the same. Each column header must be unique.',\n },\n 232: {\n key: 'TSV_COLUMN_HEADER_NA',\n severity: 'error',\n reason:\n 'An element in a tsv header is \"n/a\". A different header name should be chosen.',\n },\n 233: {\n key: 'MISSING_TSV_COLUMN_NIRS_OPTODES',\n severity: 'error',\n reason:\n \"The column names of the optodes file must begin with ['name', 'type', 'x', 'y', 'z']\",\n },\n 234: {\n key: 'MISSING_TSV_COLUMN_NIRS_CHANNELS',\n severity: 'error',\n reason:\n \"The column names of the channels file must begin with ['name', 'type', 'source', 'detector', 'wavelength_nominal', 'units']\",\n },\n 235: {\n key: 'MOTION_COMPONENT_IN_WRONG_COLUMN',\n severity: 'error',\n reason:\n \"The 'component' column must be the second column in the channels.tsv file.\",\n },\n 236: {\n key: 'MOTION_COMPONENT_INVLAID_VALUE',\n severity: 'error',\n reason:\n \"Values in the 'component' column must be one of ['x', 'y', 'z', 'quat_x', 'quat_y', 'quat_z', 'quat_w', 'n/a'].\",\n },\n}\n" }, { "alpha_fraction": 0.5465413928031921, "alphanum_fraction": 0.5661827325820923, "avg_line_length": 29.8157901763916, "blob_id": "77d0cbb3b0c9a97cdbfd8a1805e745173466c352", "content_id": "1de4402e6f500b545925ae48d92fd2cc47308033", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1171, "license_type": "permissive", "max_line_length": 100, "num_lines": 38, "path": "/bids-validator/utils/__tests__/type.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import type from '../type.js'\n\ndescribe('type.js', () => {\n describe('isBids()', () => {\n it('does not throw an error for valid defacemask filenames', () => {\n expect(\n type.isBIDS(\n '/sub-rid000043/anat/sub-rid000043_run-02_mod-T1w_defacemask.nii.gz',\n ),\n ).toBe(true)\n })\n\n it('does not throw an error for recording entity in physio data', () => {\n expect(\n type.isBIDS(\n '/sub-05/eeg/sub-05_task-matchingpennies_recording-eyetracking_physio.tsv.gz',\n ),\n ).toBe(true)\n })\n\n it('does not throw an error for recording entity in physio data at root of the dataset', () => {\n expect(\n type.isBIDS('/task-matchingpennies_recording-eyetracking_physio.json'),\n ).toBe(true)\n })\n\n const physio_task_modalities = ['eeg', 'ieeg', 'meg', 'func', 'beh']\n physio_task_modalities.map((mod) => {\n it(`does not throw an error for recording entity in ${mod} physio data`, () => {\n expect(\n type.isBIDS(\n `/sub-05/${mod}/sub-05_task-matchingpennies_recording-eyetracking_physio.tsv.gz`,\n ),\n ).toBe(true)\n })\n })\n })\n})\n" }, { "alpha_fraction": 0.48266100883483887, "alphanum_fraction": 0.4953998625278473, "avg_line_length": 26.98019790649414, "blob_id": "8884485c8695d2338649fe58eec472bef5c9a7e7", "content_id": "e443eb1c9ed0ee3547c99dfdca38e0440c8ae7bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2826, "license_type": "permissive", "max_line_length": 97, "num_lines": 101, "path": "/bids-validator/tests/consoleFormat.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport Issue from '../utils/issues'\nimport consoleFormat from '../utils/consoleFormat'\n\ndescribe('console format', () => {\n let issues\n beforeEach(() => {\n issues = {\n errors: [\n {\n key: 'TEST_ERROR',\n severity: 'error',\n reason: 'testing consoleFormat',\n files: [\n new Issue({\n key: 'TEST_ERROR',\n file: '/nonexistent/test/file.wut',\n code: 0,\n evidence: 'none',\n line: -1,\n character: -1,\n severity: 'error',\n reason: 'testing consoleFormat',\n }),\n ],\n additionalFileCount: 0,\n },\n ],\n warnings: [\n {\n key: 'TEST_WARNING',\n severity: 'warning',\n reason: 'testing consoleFormat',\n files: [\n new Issue({\n key: 'TEST_WARNING',\n file: '/nonexistent/test/file.wut',\n code: 2,\n evidence: 'none',\n line: -1,\n character: -1,\n severity: 'warning',\n reason: 'testing consoleFormat',\n }),\n ],\n additionalFileCount: 0,\n },\n ],\n summary: {\n sessions: [],\n subjects: [],\n tasks: [],\n modalities: [],\n totalFiles: 0,\n size: 0,\n },\n }\n })\n\n describe('logIssues', () => {\n it('takes an array of errors and returns them formatted as an array', () => {\n const output = consoleFormat.logIssues(issues.errors, 'red', {\n verbose: true,\n })\n assert(Array.isArray(output))\n assert.deepEqual(output, [\n '\\t\\u001b[31m1: [ERR] testing consoleFormat (code: undefined - TEST_ERROR)\\u001b[39m',\n '\\t\\ttesting consoleFormat',\n '\\t\\t@ line: -1 character: -1',\n '\\t\\tEvidence: none',\n '',\n ])\n })\n it('takes an array of warnings and returns them formatted as an array', () => {\n const output = consoleFormat.logIssues(issues.warnings, 'yellow', {\n verbose: true,\n })\n assert.deepEqual(output, [\n '\\t\\u001b[33m1: [WARN] testing consoleFormat (code: undefined - TEST_WARNING)\\u001b[39m',\n '\\t\\ttesting consoleFormat',\n '\\t\\t@ line: -1 character: -1',\n '\\t\\tEvidence: none',\n '',\n ])\n })\n })\n\n describe('issues', () => {\n it('formats issues as a string a given issues object', () => {\n const output = consoleFormat.issues(issues, {})\n assert.equal(typeof output, 'string')\n })\n })\n\n describe('summary', () => {\n it('formats summary as a string a given issues object', () => {\n const output = consoleFormat.summary(issues.summary, {})\n assert.equal(typeof output, 'string')\n })\n })\n})\n" }, { "alpha_fraction": 0.5389490723609924, "alphanum_fraction": 0.5467184782028198, "avg_line_length": 26.32402229309082, "blob_id": "0576dc7fda73567d025c9b22d8d584df13e9b070", "content_id": "cccd9e23fa6c11b8016ea50258a8ad6665f9cc99", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4891, "license_type": "permissive", "max_line_length": 226, "num_lines": 179, "path": "/bids-validator/utils/consoleFormat.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import colors from 'colors/safe'\nimport { table, getBorderCharacters } from 'table'\nimport pluralize from 'pluralize'\nimport bytes from 'bytes'\n\nexport default {\n issues: formatIssues,\n summary: formatSummary,\n logIssues,\n unexpectedError,\n}\n\nfunction unexpectedError(message) {\n return colors.red(message)\n}\n\nfunction formatIssues(issues, options = {}) {\n var errors = issues.errors\n var warnings = issues.warnings\n var output = []\n if (errors && errors.length === 1 && errors[0].code === '61') {\n output.push(\n colors.red(\n '[ERR] The given directory failed an initial Quick Test. This means the basic names and structure of the files and directories do not comply with BIDS specification. For more info go to https://bids.neuroimaging.io/',\n ),\n )\n } else if (issues.config && issues.config.length >= 1) {\n output.push(colors.red('[ERR] Invalid Config File'))\n for (var i = 0; i < issues.config.length; i++) {\n var issue = issues.config[i]\n issue.file.file = { relativePath: issue.file.path }\n issue.files = [issue.file]\n }\n output = output.concat(logIssues(issues.config, 'red', options))\n } else if (errors.length >= 1 || warnings.length >= 1) {\n output = output.concat(logIssues(errors, 'red', options))\n output = output.concat(logIssues(warnings, 'yellow', options))\n } else {\n output.push(colors.green('This dataset appears to be BIDS compatible.'))\n }\n return output.join('\\n')\n}\n\nfunction logIssues(issues, color, options) {\n const severity = color == 'red' ? 'ERR' : 'WARN'\n const output = []\n for (var i = 0; i < issues.length; i++) {\n const issue = issues[i]\n const issueNumber = i + 1\n output.push(\n '\\t' +\n colors[color](\n issueNumber +\n ': ' +\n `[${severity}] ` +\n issue.reason +\n ' (code: ' +\n issue.code +\n ' - ' +\n issue.key +\n ')',\n ),\n )\n for (var j = 0; j < issue.files.length; j++) {\n var file = issues[i].files[j]\n if (!file || !file.file) {\n continue\n }\n let indent = '\\t\\t'\n if (file.file.relativePath) {\n output.push(`${indent}.` + file.file.relativePath)\n indent = '\\t\\t\\t'\n }\n if (options.verbose) {\n output.push(indent + file.reason)\n }\n if (file.line) {\n var msg = `${indent}@ line: ` + file.line\n if (file.character) {\n msg += ' character: ' + file.character\n }\n output.push(msg)\n }\n if (file.evidence) {\n output.push(`${indent}Evidence: ` + file.evidence)\n }\n }\n if (issue.additionalFileCount > 0) {\n output.push(\n '\\t\\t' +\n colors[color](\n '... and ' +\n issue.additionalFileCount +\n ' more files having this issue (Use --verbose to see them all).',\n ),\n )\n }\n output.push('')\n if (issue.helpUrl) {\n output.push(\n colors.cyan(\n '\\t' +\n 'Please visit ' +\n issue.helpUrl +\n ' for existing conversations about this issue.',\n ),\n )\n output.push('')\n }\n }\n return output\n}\n\nfunction formatSummary(summary) {\n const output = []\n if (summary) {\n var numSessions = summary.sessions.length > 0 ? summary.sessions.length : 1\n\n // data\n var column1 = [\n summary.totalFiles +\n ' ' +\n pluralize('File', summary.totalFiles) +\n ', ' +\n bytes(summary.size),\n summary.subjects.length +\n ' - ' +\n pluralize('Subject', summary.subjects.length),\n numSessions + ' - ' + pluralize('Session', numSessions),\n ],\n column2 = summary.tasks,\n column3 = summary.modalities\n\n var longestColumn = Math.max(column1.length, column2.length, column3.length)\n var pad = ' '\n\n // headers\n var headers = [\n pad,\n colors.blue.underline('Summary:') + pad,\n colors.blue.underline('Available Tasks:') + pad,\n colors.blue.underline('Available Modalities:'),\n ]\n\n // rows\n var rows = [headers]\n for (var i = 0; i < longestColumn; i++) {\n var val1, val2, val3\n val1 = column1[i] ? column1[i] + pad : ''\n val2 = column2[i] ? column2[i] + pad : ''\n val3 = column3[i] ? column3[i] : ''\n rows.push([pad, val1, val2, val3])\n }\n output.push(\n table(rows, {\n border: getBorderCharacters(`void`),\n columnDefault: {\n paddingLeft: 0,\n paddingRight: 1,\n },\n drawHorizontalLine: () => {\n return false\n },\n }),\n )\n\n output.push('')\n\n //Neurostars message\n output.push(\n colors.cyan(\n '\\tIf you have any questions, please post on https://neurostars.org/tags/bids.',\n ),\n )\n\n output.push('')\n }\n return output.join('\\n')\n}\n" }, { "alpha_fraction": 0.6330859065055847, "alphanum_fraction": 0.6373276710510254, "avg_line_length": 33.925926208496094, "blob_id": "6856949ca1ac7ba17c82b3d3a5ebc7befc8a9687", "content_id": "e303991711d45885fd5fdae583d511a0e71aa9b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2829, "license_type": "permissive", "max_line_length": 107, "num_lines": 81, "path": "/bids-validator/validators/options.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// ESM import for yargs does not work for mysterious reasons\nconst yargs = require('yargs')\n\nexport function parseOptions(argumentOverride) {\n return yargs(argumentOverride)\n .usage('Usage: $0 <dataset_directory> [options]')\n .help('help')\n .alias('help', 'h')\n .version(require('../package.json').version)\n .alias('version', 'v')\n .demand(1, 1)\n .boolean('ignoreWarnings')\n .describe('ignoreWarnings', 'Disregard non-critical issues')\n .boolean('ignoreNiftiHeaders')\n .describe(\n 'ignoreNiftiHeaders',\n 'Disregard NIfTI header content during validation',\n )\n .boolean('ignoreSubjectConsistency')\n .describe(\n 'ignoreSubjectConsistency',\n 'Skip checking that any given file for one subject is present for all other subjects.',\n )\n .option('blacklistModalities', {\n default: [],\n describe: 'Array of modalities to error on if detected.',\n array: true,\n choices: ['MRI', 'PET', 'MEG', 'EEG', 'iEEG', 'Microscopy', 'NIRS'],\n })\n .boolean('verbose')\n .describe('verbose', 'Log more extensive information about issues')\n .boolean('json')\n .describe('json', 'Output results as JSON')\n .boolean('no-color')\n .describe('no-color', 'Disable colors in output text.')\n .default('no-color', false)\n .boolean('ignoreSymlinks')\n .describe(\n 'ignoreSymlinks',\n 'Skip any symlinked directories when validating a dataset',\n )\n .boolean('remoteFiles')\n .describe('remoteFiles', 'Validate remote files.')\n .default('remoteFiles', false)\n .boolean('gitTreeMode')\n .describe(\n 'gitTreeMode',\n 'Improve performance using git metadata. Does not capture changes not known to git.',\n )\n .option('gitRef', {\n describe:\n 'Targets files at a given branch, tag, or commit hash. Use with --gitTreeMode. [default: \"HEAD\"]',\n type: 'string',\n })\n .implies('gitRef', 'gitTreeMode')\n .option('config', {\n alias: 'c',\n describe:\n 'Optional configuration file. See https://github.com/bids-standard/bids-validator for more info',\n default: '.bids-validator-config.json',\n })\n .boolean('filenames')\n .default('filenames', false)\n .describe(\n 'filenames',\n 'A less accurate check that reads filenames one per line from stdin.',\n )\n .hide('filenames')\n .option('schema', {\n alias: 's',\n describe:\n 'BIDS specification schema version to use for validation, e.g. \"v1.6.0\" (beta)',\n default: 'disable',\n choices: ['disable', 'v1.6.0', 'v1.7.0', 'master'],\n })\n .epilogue(\n 'This tool checks if a dataset in a given directory is \\\ncompatible with the Brain Imaging Data Structure specification. To learn \\\nmore about Brain Imaging Data Structure visit http://bids.neuroimaging.io',\n ).argv\n}\n" }, { "alpha_fraction": 0.6790928840637207, "alphanum_fraction": 0.6790928840637207, "avg_line_length": 29.951923370361328, "blob_id": "b3853846fa723b7ed393ce9a8766f3f713509cbe", "content_id": "03870cce8e30dc8c7f6764a711b783d31229cec5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3219, "license_type": "permissive", "max_line_length": 81, "num_lines": 104, "path": "/bids-validator/src/validators/bids.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { CheckFunction } from '../types/check.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { GenericSchema } from '../types/schema.ts'\nimport { ValidationResult } from '../types/validation-result.ts'\nimport { applyRules } from '../schema/applyRules.ts'\nimport { walkFileTree } from '../schema/walk.ts'\nimport { loadSchema } from '../setup/loadSchema.ts'\nimport { ValidatorOptions } from '../setup/options.ts'\nimport { Summary } from '../summary/summary.ts'\nimport { filenameIdentify } from './filenameIdentify.ts'\nimport { filenameValidate } from './filenameValidate.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { emptyFile } from './internal/emptyFile.ts'\nimport { BIDSContext, BIDSContextDataset } from '../schema/context.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { parseOptions } from '../setup/options.ts'\n\n/**\n * Ordering of checks to apply\n */\nconst CHECKS: CheckFunction[] = [\n emptyFile,\n filenameIdentify,\n filenameValidate,\n applyRules,\n]\n\n/**\n * Full BIDS schema validation entrypoint\n */\nexport async function validate(\n fileTree: FileTree,\n options: ValidatorOptions,\n): Promise<ValidationResult> {\n const issues = new DatasetIssues()\n const summary = new Summary()\n const schema = await loadSchema(options.schema)\n summary.schemaVersion = schema.schema_version\n\n /* There should be a dataset_description in root, this will tell us if we\n * are dealing with a derivative dataset\n */\n const ddFile = fileTree.files.find(\n (file: BIDSFile) => file.name === 'dataset_description.json',\n )\n\n let dsContext\n if (ddFile) {\n const description = await ddFile.text().then((text) => JSON.parse(text))\n summary.dataProcessed = description.DatasetType === 'derivative'\n dsContext = new BIDSContextDataset(options, description)\n } else {\n dsContext = new BIDSContextDataset(options)\n }\n\n let derivatives: FileTree[] = []\n fileTree.directories = fileTree.directories.filter((dir) => {\n if (dir.name === 'derivatives') {\n dir.directories.map((deriv) => {\n if (\n deriv.files.some(\n (file: BIDSFile) => file.name === 'dataset_description.json',\n )\n ) {\n derivatives.push(deriv)\n }\n })\n return false\n }\n return true\n })\n\n for await (const context of walkFileTree(fileTree, issues, dsContext)) {\n // TODO - Skip ignored files for now (some tests may reference ignored files)\n if (context.file.ignored) {\n continue\n }\n await context.asyncLoads()\n // Run majority of checks\n for (const check of CHECKS) {\n // TODO - Resolve this double casting?\n await check(schema as unknown as GenericSchema, context)\n }\n await summary.update(context)\n }\n\n let derivativesSummary: Record<string, ValidationResult> = {}\n await Promise.allSettled(\n derivatives.map(async (deriv) => {\n derivativesSummary[deriv.name] = await validate(deriv, options)\n return derivativesSummary[deriv.name]\n }),\n )\n\n let output: ValidationResult = {\n issues,\n summary: summary.formatOutput(),\n }\n\n if (Object.keys(derivativesSummary).length) {\n output['derivativesSummary'] = derivativesSummary\n }\n return output\n}\n" }, { "alpha_fraction": 0.3778519630432129, "alphanum_fraction": 0.43266555666923523, "avg_line_length": 24.13286781311035, "blob_id": "0a89616abe1a2df7dfc1652d61eb243c8d29b147", "content_id": "33b16d6b7a2c0822e2d2290f61fc877df87b4c12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3594, "license_type": "permissive", "max_line_length": 107, "num_lines": 143, "path": "/bids-validator/tests/headerField.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport headerFields from '../validators/headerFields'\n\ndescribe('headerFields', () => {\n it('should throw an error if _magnitude1 or _magnitude2 files do not have exactly 3 dimensions.', () => {\n const headers = [\n // each of these headers has one too many dimensions on the 'dim' field.\n [\n {\n name: 'sub-01_magnitude1.nii',\n relativePath: 'sub-01_magnitude1.nii',\n },\n {\n dim: [5, 1, 1, 1, 1],\n pixdim: [5, 1, 1, 1, 1],\n xyzt_units: [5, 1, 1, 1, 1],\n },\n ],\n [\n {\n name: 'sub-01_magnitude2.nii',\n relativePath: 'sub-01_magnitude2.nii',\n },\n {\n dim: [5, 1, 1, 1, 1],\n pixdim: [5, 1, 1, 1, 1],\n xyzt_units: [5, 1, 1, 1, 1],\n },\n ],\n // each of these headers has one too few dimensions on the 'dim' field.\n [\n {\n name: 'sub-02_magnitude1.nii',\n relativePath: 'sub-02_magnitude1.nii',\n },\n {\n dim: [3, 1, 1],\n pixdim: [4, 1, 1, 1],\n xyzt_units: [4, 1, 1, 1],\n },\n ],\n [\n {\n name: 'sub-02_magnitude2.nii',\n relativePath: 'sub-02_magnitude2.nii',\n },\n {\n dim: [3, 1, 1],\n pixdim: [4, 1, 1, 1],\n xyzt_units: [4, 1, 1, 1],\n },\n ],\n ]\n const issues = headerFields(headers)\n assert(\n issues.length == 4 &&\n issues[0].code == '94' &&\n issues[1].code == '94' &&\n issues[2].code == '94' &&\n issues[3].code == '94',\n )\n })\n\n it('_magnitude1 or _magnitude2 files should have 3 dimensions.', () => {\n const headers = [\n [\n {\n name: 'sub-01_magnitude1.nii',\n relativePath: 'sub-01_magnitude1.nii',\n },\n {\n dim: [3, 1, 1, 1],\n pixdim: [3, 1, 1, 1],\n xyzt_units: [3, 1, 1, 1],\n },\n ],\n [\n {\n name: 'sub-01_magnitude2.nii',\n relativePath: 'sub-01_magnitude2.nii',\n },\n {\n dim: [3, 1, 1, 1],\n pixdim: [3, 1, 1, 1],\n xyzt_units: [3, 1, 1, 1],\n },\n ],\n ]\n const issues = headerFields(headers)\n assert.deepEqual(issues, [])\n })\n\n it('should throw an error if _T1w files has the wrong dimensions.', () => {\n // each of these headers has one too many dimensions on the 'dim' field.\n // the first entry is the total count, and the following three entries are spatial.\n const headers = [\n [\n {\n name: 'sub-01_T1w.nii',\n relativePath: 'sub-01_T1w.nii',\n },\n {\n dim: [5, 1, 1, 1, 1],\n pixdim: [5, 1, 1, 1, 1],\n xyzt_units: [5, 1, 1, 1, 1],\n },\n ],\n [\n {\n name: 'sub-02_T1w.nii',\n relativePath: 'sub-02_T1w.nii',\n },\n {\n dim: [3, 1, 1],\n pixdim: [4, 1, 1, 1],\n xyzt_units: [4, 1, 1, 1],\n },\n ],\n ]\n const issues = headerFields(headers)\n assert(\n issues.length == 2 && issues[0].code == '95' && issues[1].code == '95',\n )\n })\n\n it('_T1w files should have exactly 3 dimensions.', () => {\n const headers = [\n [\n {\n name: 'sub-01_T1w.nii',\n relativePath: 'sub-01_T1w.nii',\n },\n {\n dim: [3, 1, 1, 1],\n pixdim: [3, 1, 1, 1],\n xyzt_units: [4, 1, 1, 1],\n },\n ],\n ]\n const issues = headerFields(headers)\n assert.deepEqual(issues, [])\n })\n})\n" }, { "alpha_fraction": 0.6334951519966125, "alphanum_fraction": 0.6407766938209534, "avg_line_length": 23.235294342041016, "blob_id": "dfc931f2487735b81757752ec870bac580e13b9e", "content_id": "4871bff960ce01f14e174882bca5ba68615450fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 412, "license_type": "permissive", "max_line_length": 51, "num_lines": 17, "path": "/bids-validator/utils/files/readOMEFile.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import ExifReader from 'exifreader'\nconst xml2js = require('xml2js')\n\nconst readOMEFile = (buffer) => {\n let tags = ExifReader.load(buffer)\n let xml = tags['ImageDescription']['description']\n return new Promise((resolve, reject) => {\n xml2js\n .parseStringPromise(xml)\n .then((result) => {\n resolve(result)\n })\n .catch((error) => reject(error))\n })\n}\n\nexport default readOMEFile\n" }, { "alpha_fraction": 0.5303825736045837, "alphanum_fraction": 0.5337584614753723, "avg_line_length": 24.883495330810547, "blob_id": "bdde5f6868929caa82015b9eb06abf360155e733", "content_id": "7b11c3f403b3e387a603cd06e824815be5f18efd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2666, "license_type": "permissive", "max_line_length": 174, "num_lines": 103, "path": "/bids-validator/tests/utils/issues.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport utils from '../../utils'\n\ndescribe('issues', () => {\n describe('exceptionHandler', () => {\n let testErr, issueList, summary, options, formattedIssues\n\n beforeAll(() => {\n testErr = new Error('oh no')\n issueList = []\n summary = {\n sessions: [],\n subjects: [],\n tasks: [],\n modalities: [],\n totalFiles: 0,\n size: 0,\n }\n options = {\n ignoreWarnings: false,\n ignoreNiftiHeaders: false,\n verbose: false,\n config: {},\n }\n formattedIssues = utils.issues.exceptionHandler(\n testErr,\n issueList,\n summary,\n options,\n )\n })\n\n it('adds INTERNAL ERROR to the issues.errors list', () => {\n assert.equal(formattedIssues.errors[0].key, 'INTERNAL ERROR')\n })\n\n it(\"creates a properly formatted issue in the error's files property\", () => {\n const exceptionIssue = formattedIssues.errors[0].files[0]\n assert.ok(utils.issues.isAnIssue(exceptionIssue))\n })\n\n it('gives a reason for the error', () => {\n const exceptionIssue = formattedIssues.errors[0].files[0]\n assert.equal(\n exceptionIssue.reason,\n `${testErr.message}; please help the BIDS team and community by opening an issue at (https://github.com/bids-standard/bids-validator/issues) with the evidence here.`,\n )\n })\n })\n\n describe('exception/issue redirect', () => {\n let promise, innerPromise, validIssue, invalidIssue\n beforeAll(() => {\n promise = null\n validIssue = new utils.issues.Issue({\n code: 12,\n file: 'goodstuff.json',\n reason: 'a series of unfortunate events',\n })\n invalidIssue = new Error('oops')\n\n promise = () => {\n return new Promise((resolve, reject) => {\n innerPromise().catch((err) =>\n utils.issues.redirect(err, reject, () => {\n resolve()\n }),\n )\n })\n }\n })\n\n it('resolves with valid issue', (done) => {\n innerPromise = () =>\n new Promise((_, reject) => {\n reject(validIssue)\n })\n\n promise().then(() => done())\n })\n\n it('rejects exceptions', (done) => {\n innerPromise = () =>\n new Promise((_, reject) => {\n reject(invalidIssue)\n })\n\n promise().catch(() => done())\n })\n\n it('passes the exception through the error', (done) => {\n innerPromise = () =>\n new Promise((_, reject) => {\n reject(invalidIssue)\n })\n\n promise().catch((err) => {\n assert.deepEqual(err, invalidIssue)\n done()\n })\n })\n })\n})\n" }, { "alpha_fraction": 0.6141414046287537, "alphanum_fraction": 0.615007221698761, "avg_line_length": 22.571428298950195, "blob_id": "7884649af24e66c851a5662986d0e54a4bda6eb8", "content_id": "c2f0da9747e81fd7b8fa5062f0f16f63ec9a8d63", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3465, "license_type": "permissive", "max_line_length": 80, "num_lines": 147, "path": "/bids-validator/src/issues/datasetIssues.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { nonSchemaIssues } from './list.ts'\nimport {\n Issue,\n IssueFile,\n IssueOutput,\n IssueFileOutput,\n Severity,\n FullTestIssuesReturn,\n} from '../types/issues.ts'\n\n// Code is deprecated, return something unusual but JSON serializable\nconst CODE_DEPRECATED = Number.MIN_SAFE_INTEGER\n\n/**\n * Format an internal file reference with context as IssueFileOutput\n */\nconst issueFile = (issue: Issue, f: IssueFile): IssueFileOutput => {\n const evidence = f.evidence || ''\n const reason = issue.reason || ''\n const line = f.line || 0\n const character = f.character || 0\n return {\n key: issue.key,\n code: CODE_DEPRECATED,\n file: { path: f.path, name: f.name, relativePath: f.path },\n evidence,\n line,\n character,\n severity: issue.severity,\n reason,\n helpUrl: issue.helpUrl,\n }\n}\n\ninterface DatasetIssuesAddParams {\n key: string\n reason: string\n // Defaults to error\n severity?: Severity\n // Defaults to an empty array if no files are provided\n files?: Array<IssueFile>\n}\n\n/**\n * Management class for dataset issues\n */\nexport class DatasetIssues extends Map<string, Issue> {\n constructor() {\n super()\n }\n\n add({\n key,\n reason,\n severity = 'error',\n files = [],\n }: DatasetIssuesAddParams): Issue {\n const existingIssue = this.get(key)\n // Handle both the shorthand BIDSFile array or full IssueFile\n if (existingIssue) {\n for (const f of files) {\n existingIssue.files.set(f.path, f)\n }\n return existingIssue\n } else {\n const newIssue = new Issue({\n key,\n severity,\n reason,\n files,\n })\n this.set(key, newIssue)\n return newIssue\n }\n }\n\n // Shorthand to test if an issue has occurred\n hasIssue({ key }: { key: string }): boolean {\n if (this.has(key)) {\n return true\n }\n return false\n }\n\n addNonSchemaIssue(key: string, files: Array<IssueFile>) {\n if (key in nonSchemaIssues) {\n this.add({\n key,\n reason: nonSchemaIssues[key].reason,\n severity: nonSchemaIssues[key].severity,\n files,\n })\n } else {\n throw new Error(\n `key: ${key} does not exist in non-schema issues definitions`,\n )\n }\n }\n\n fileInIssues(path: string): Issue[] {\n const matchingIssues = []\n for (const [key, issue] of this) {\n if (issue.files.get(path)) {\n matchingIssues.push(issue)\n }\n }\n return matchingIssues\n }\n\n /**\n * Report Issue keys related to a file\n * @param path File path relative to dataset root\n * @returns Array of matching issue keys\n */\n getFileIssueKeys(path: string): string[] {\n return this.fileInIssues(path).map((issue) => issue.key)\n }\n\n /**\n * Format output\n *\n * Converts from new internal representation to old IssueOutput structure\n */\n formatOutput(): FullTestIssuesReturn {\n const output: FullTestIssuesReturn = {\n errors: [],\n warnings: [],\n }\n for (const [key, issue] of this) {\n const outputIssue: IssueOutput = {\n severity: issue.severity,\n key: issue.key,\n code: CODE_DEPRECATED,\n additionalFileCount: 0,\n reason: issue.reason,\n files: Array.from(issue.files.values()).map((f) => issueFile(issue, f)),\n helpUrl: issue.helpUrl,\n }\n if (issue.severity === 'warning') {\n output.warnings.push(outputIssue)\n } else {\n output.errors.push(outputIssue)\n }\n }\n return output\n }\n}\n" }, { "alpha_fraction": 0.6008676886558533, "alphanum_fraction": 0.6008676886558533, "avg_line_length": 23.91891860961914, "blob_id": "79e97efce48555279a5f95e7c4d5b98e8ce45e6b", "content_id": "4252162749692f7dc3e747992206e9f30fb4e195", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 922, "license_type": "permissive", "max_line_length": 70, "num_lines": 37, "path": "/bids-validator-web/components/Options.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import React from 'react'\n\nconst Options = ({ setOption, options }) => (\n <>\n <form className=\"options\" onClick={setOption}>\n <label>\n <strong>Options:</strong>\n </label>\n <input\n name=\"ignoreWarnings\"\n type=\"checkbox\"\n checked={options.ignoreWarnings}\n readOnly\n />\n <label htmlFor=\"ignoreWarnings\">Ignore Warnings</label>\n <input\n name=\"ignoreNiftiHeaders\"\n type=\"checkbox\"\n checked={options.ignoreNiftiHeaders}\n readOnly\n />\n <label htmlFor=\"ignoreNiftiHeaders\">Ignore NIfTI Headers</label>\n <input\n name=\"ignoreSubjectConsistency\"\n type=\"checkbox\"\n checked={options.ignoreSubjectConsistency}\n readOnly\n />\n <label htmlFor=\"ignoreSubjectConsistency\">\n Skip Subject Filename Consistency Check\n </label>\n </form>\n <hr />\n </>\n)\n\nexport default Options\n" }, { "alpha_fraction": 0.6598984599113464, "alphanum_fraction": 0.6709465384483337, "avg_line_length": 31.514562606811523, "blob_id": "86d8dce665ac374301c45febe4520ad49aa5b47b", "content_id": "c765311a0118c893a76cbee79c7b43540f926616", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3349, "license_type": "permissive", "max_line_length": 78, "num_lines": 103, "path": "/bids-validator/src/validators/filenameIdentify.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals } from '../deps/asserts.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport {\n _findRuleMatches,\n datatypeFromDirectory,\n hasMatch,\n} from './filenameIdentify.ts'\nimport { BIDSFileDeno } from '../files/deno.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { FileIgnoreRules } from '../files/ignore.ts'\nimport { loadSchema } from '../setup/loadSchema.ts'\n\nconst PATH = 'tests/data/valid_dataset'\nconst schema = await loadSchema()\nconst fileTree = new FileTree(PATH, '/')\nconst issues = new DatasetIssues()\nconst ignore = new FileIgnoreRules([])\n\nconst node = {\n stem: 'participants',\n}\n\nconst recurseNode = {\n recurse: {\n suffixes: 'bold',\n },\n}\n\nconst schemaPath = 'test.schema.path'\n\nDeno.test('test _findRuleMatches', async (t) => {\n // base case\n await t.step('Rule stem matches', async () => {\n const fileName = 'participants.json'\n const file = new BIDSFileDeno(PATH, fileName, ignore)\n const context = new BIDSContext(fileTree, file, issues)\n _findRuleMatches(node, schemaPath, context)\n assertEquals(context.filenameRules[0], schemaPath)\n })\n\n //recurse case\n await t.step(\n 'Non-terminal schema node, should recurse then match',\n async () => {\n const fileName = 'task-rest_bold.json'\n const file = new BIDSFileDeno(PATH, fileName, ignore)\n const context = new BIDSContext(fileTree, file, issues)\n _findRuleMatches(recurseNode, schemaPath, context)\n assertEquals(context.filenameRules[0], `${schemaPath}.recurse`)\n },\n )\n})\n\nDeno.test('test datatypeFromDirectory', (t) => {\n const filesToTest = [\n ['/sub-01/ses-01/func/sub-01_ses-01_task-nback_run-01_bold.nii', 'func'],\n ['/sub-01/ses-01/anat/sub-01_ses-01_T1w.nii', 'anat'],\n ]\n filesToTest.map((test) => {\n const file = new BIDSFileDeno(PATH, test[0], ignore)\n const context = new BIDSContext(fileTree, file, issues)\n datatypeFromDirectory(schema, context)\n assertEquals(context.datatype, test[1])\n })\n})\n\nDeno.test('test hasMatch', async (t) => {\n await t.step('hasMatch', async () => {\n const fileName =\n '/sub-01/ses-01/func/sub-01_ses-01_task-nback_run-01_bold.nii'\n const file = new BIDSFileDeno(PATH, fileName, ignore)\n const context = new BIDSContext(fileTree, file, issues)\n hasMatch(schema, context)\n })\n\n await t.step('No match', async () => {\n const fileName = Deno.makeTempFileSync().split('/')[2]\n const file = new BIDSFileDeno('/tmp', fileName, ignore)\n\n const context = new BIDSContext(fileTree, file, issues)\n await hasMatch(schema, context)\n assertEquals(\n context.issues\n .getFileIssueKeys(context.file.path)\n .includes('NOT_INCLUDED'),\n true,\n )\n })\n await t.step('1+ matched, datatype match', async () => {\n const path = `${PATH}/../bids-examples/fnirs_automaticity`\n const fileName = 'events.json'\n const file = new BIDSFileDeno(path, fileName, ignore)\n const context = new BIDSContext(fileTree, file, issues)\n context.filenameRules = [\n 'rules.files.raw.task.events__mri',\n 'rules.files.raw.task.events__pet',\n ]\n await hasMatch(schema, context)\n assertEquals(context.filenameRules.length, 1)\n assertEquals(context.filenameRules[0], 'rules.files.raw.task.events__mri')\n })\n})\n" }, { "alpha_fraction": 0.6788976192474365, "alphanum_fraction": 0.6823621988296509, "avg_line_length": 32.77659606933594, "blob_id": "3c8c85fa31f7d928d18bef6e8a00aa0de34ededf", "content_id": "f286f807fbb063e3dc8cb5c84e9dd88d132a9e7e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 6350, "license_type": "permissive", "max_line_length": 78, "num_lines": 188, "path": "/bids-validator/src/validators/filenameIdentify.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/*\n * filenameIdentify.ts attempts to determine which schema rules from\n * `schema.rules.files` might apply to a given file context by looking at the\n * files suffix then its location in the directory hierarchy, and finally at\n * its extensions and entities. Ideally we end up with a single rule to\n * validate against. We try to take as broad an approach to finding a single\n * file rule as possible to generate the most possible errors for incorrectly\n * named files. Historically a regex was applied that was pass/fail with\n * little in the way of feed back. This way we can say hey you got the suffix\n * correct, but the directory is slightly off, or some entities are missing,\n * or too many are there for this rule. All while being able to point at an\n * object in the schema for reference.\n */\n// @ts-nocheck\nimport { SEP } from '../deps/path.ts'\nimport { GenericSchema, Schema } from '../types/schema.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport { lookupModality } from '../schema/modalities.ts'\nimport { CheckFunction } from '../types/check.ts'\nimport { lookupEntityLiteral } from './filenameValidate.ts'\n\nconst CHECKS: CheckFunction[] = [\n datatypeFromDirectory,\n findRuleMatches,\n hasMatch,\n cleanContext,\n]\n\nexport async function filenameIdentify(schema, context) {\n for (const check of CHECKS) {\n await check(schema as unknown as GenericSchema, context)\n }\n}\n\nfunction findRuleMatches(schema, context) {\n const schemaPath = 'rules.files'\n Object.keys(schema[schemaPath]).map((key) => {\n const path = `${schemaPath}.${key}`\n _findRuleMatches(schema[path], path, context)\n })\n return Promise.resolve()\n}\n\n/* Schema rules specifying valid filenames follow a variety of patterns.\n * 'path', 'stem' or 'suffixies' contain the most unique identifying\n * information for a rule. We don't know what kind of filename the context is,\n * so if one of these three match the respective value in the context lets\n * assume that this schema rule is applicable to this file.\n */\nexport function _findRuleMatches(node, path, context) {\n if (\n ('path' in node && context.file.name.endsWith(node.path)) ||\n ('stem' in node && context.file.name.startsWith(node.stem)) ||\n ('suffixes' in node && node.suffixes.includes(context.suffix))\n ) {\n context.filenameRules.push(path)\n return\n }\n if (\n !('path' in node || 'stem' in node || 'suffixes' in node) &&\n typeof node === 'object'\n ) {\n Object.keys(node).map((key) => {\n _findRuleMatches(node[key], `${path}.${key}`, context)\n })\n }\n}\n\nexport async function datatypeFromDirectory(schema, context) {\n const subEntity = schema.objects.entities.subject.name\n const subFormat = schema.objects.formats[subEntity.format]\n const sesEntity = schema.objects.entities.session.name\n const sesFormat = schema.objects.formats[sesEntity.format]\n const parts = context.file.path.split(SEP)\n let datatypeIndex = 2\n if (parts[0] !== '') {\n // we assume paths have leading '/'\n }\n // Ignoring associated data for now\n const subParts = parts[1].split('-')\n if (!(subParts.length === 2 && subParts[0] === subEntity)) {\n // first directory must be subject\n }\n if (parts.length < 3) {\n return Promise.resolve()\n }\n const sesParts = parts[2].split('-')\n if (sesParts.length === 2 && sesParts[0] === sesEntity) {\n datatypeIndex = 3\n }\n const dirDatatype = parts[datatypeIndex]\n for (let key in schema.rules.modalities) {\n if (schema.rules.modalities[key].datatypes.includes(dirDatatype)) {\n context.modality = key\n context.datatype = dirDatatype\n return Promise.resolve()\n }\n }\n}\n\nexport function hasMatch(schema, context) {\n if (\n context.filenameRules.length === 0 &&\n context.file.path !== '/.bidsignore'\n ) {\n context.issues.addNonSchemaIssue('NOT_INCLUDED', [context.file])\n }\n\n /* we have matched multiple rules and a datatype, lets see if we have one\n * rule with the same datatype, if so just use that one.\n */\n if (context.filenameRules.length > 1) {\n const datatypeMatch = context.filenameRules.filter((rulePath) => {\n if (Array.isArray(schema[rulePath].datatypes)) {\n return schema[rulePath].datatypes.includes(context.datatype)\n } else {\n return false\n }\n })\n if (datatypeMatch.length > 0) {\n context.filenameRules = datatypeMatch\n }\n }\n\n /* Filtering applicable rules based on datatypes failed, lets see if the\n * entities and extensions are enough to find a single rule to use.\n */\n if (context.filenameRules.length > 1) {\n const entExtMatch = context.filenameRules.filter((rulePath) => {\n return entitiesExtensionsInRule(schema, context, rulePath)\n })\n if (entExtMatch.length > 0) {\n context.filenameRules = [entExtMatch[0]]\n }\n }\n /* If we end up with multiple rules we should generate an error? */\n if (context.filenameRules.length > 1) {\n }\n\n return Promise.resolve()\n}\n\n/* Test if all of a given context's extension and entities are present in a\n * given rule. Only used to see if one rule is more applicable than another\n * after suffix and datatype matches couldn't find only one rule.\n */\nfunction entitiesExtensionsInRule(\n schema: GenericSchema,\n context: BIDSContext,\n path: string,\n): boolean {\n const rule = schema[path]\n const fileEntities = Object.keys(context.entities)\n const ruleEntities = Object.keys(rule.entities).map((key) =>\n lookupEntityLiteral(key, schema),\n )\n const extInRule =\n !rule.extensions ||\n (rule.extensions && rule.extensions.includes(context.extension))\n const entInRule =\n !rule.entities ||\n (rule.entities &&\n fileEntities.every((ent) => {\n return ruleEntities.includes(ent)\n }))\n return extInRule && entInRule\n}\n\n/* If none of the rules applicable to a filename use entities or what not,\n * lets remove them from the context so we don't trigger any unintended rules\n */\nfunction cleanContext(schema, context) {\n const rules = context.filenameRules.map((path) => schema[path])\n const filenameParts = [\n ['entities', 'entities', {}],\n ['extensions', 'extension', ''],\n ['suffixes', 'suffix', ''],\n ]\n filenameParts.map((part) => {\n if (\n rules.every(\n (rule) => !rule[part[0]] || Object.keys(rule[part[0]]).length === 0,\n )\n ) {\n context[part[1]] = part[2]\n }\n })\n}\n" }, { "alpha_fraction": 0.7261538505554199, "alphanum_fraction": 0.7384615540504456, "avg_line_length": 19.3125, "blob_id": "6973429bdb9801a60876d64ac31b0eb2d30a821d", "content_id": "9a51c99214062645cd3faa7055577bfcb80c9f39", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 325, "license_type": "permissive", "max_line_length": 48, "num_lines": 16, "path": "/Dockerfile", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "FROM node:18-alpine as build\nRUN npm install -g npm\n\nCOPY . /src\nWORKDIR /src\n\nRUN npm install \nRUN npm -w bids-validator run build\nRUN npm -w bids-validator pack\n\nFROM node:18-alpine\n\nCOPY --from=build /src/bids-validator-*.tgz /tmp\nRUN npm install -g /tmp/bids-validator-*.tgz\n\nENTRYPOINT [\"/usr/local/bin/bids-validator\"]\n" }, { "alpha_fraction": 0.6884058117866516, "alphanum_fraction": 0.7004830837249756, "avg_line_length": 36.6363639831543, "blob_id": "b9dac8144f376760ef2e903d6aab77fc76185cc1", "content_id": "658ced48cba2db08cd57ff68479d257c29788508", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 828, "license_type": "permissive", "max_line_length": 111, "num_lines": 22, "path": "/bids-validator/src/files/nifti.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import 'https://raw.githubusercontent.com/rii-mango/NIFTI-Reader-JS/v0.6.4/release/current/nifti-reader-min.js'\nimport { BIDSFile } from '../types/file.ts'\nimport { logger } from '../utils/logger.ts'\n\nexport async function loadHeader(file: BIDSFile) {\n try {\n const buf = await file.readBytes(1024)\n // @ts-expect-error NIFTI-Reader-JS required mangling globals\n const header = globalThis.nifti.readHeader(buf.buffer)\n // normalize between nifti-reader and spec schema\n // https://github.com/bids-standard/bids-specification/blob/master/src/schema/meta/context.yaml#L200\n if (header) {\n header.pixdim = header.pixDims\n header.dim = header.dims\n }\n return header\n } catch (err) {\n logger.warning(`NIfTI file could not be opened or read ${file.path}`)\n logger.debug(err)\n return\n }\n}\n" }, { "alpha_fraction": 0.5420368909835815, "alphanum_fraction": 0.5456823706626892, "avg_line_length": 28.260000228881836, "blob_id": "8f297b2c8ee5a1d06a087713511ee4904c55445c", "content_id": "c2fcfc7206b8a0431d383e7a5dcc35ea6873227e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4389, "license_type": "permissive", "max_line_length": 97, "num_lines": 150, "path": "/bids-validator/validators/tsv/__tests__/validateTsvColumns.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport validateTsvColumns, {\n validatePetBloodHeaders,\n} from '../validateTsvColumns'\n\ndescribe('validateTsvColumns', () => {\n describe('for participants.tsv', () => {\n const file = {\n name: 'participants.tsv',\n relativePath: '/participants.tsv',\n }\n const jsonContentsDict = {\n '/participants.json': { NewColumn: 'description' },\n }\n\n it('allows for tabular files with columns that are described in the bids spec', () => {\n const tsvs = [\n {\n contents: 'participant_id\\n',\n file: file,\n },\n ]\n const issues = validateTsvColumns(tsvs, {}, [])\n assert.lengthOf(issues, 0)\n })\n it('checks for tabular files with custom columns not described in a data dictionary', () => {\n const tsvs = [\n {\n contents: 'header1\\n',\n file: file,\n },\n ]\n const issues = validateTsvColumns(tsvs, {}, [])\n assert.lengthOf(issues, 1)\n assert.equal(issues[0].code, 82)\n })\n it('allows custom columns if they are described in a data dictionary', () => {\n const tsvs = [\n {\n contents: 'NewColumn\\n',\n file: file,\n },\n ]\n const issues = validateTsvColumns(tsvs, jsonContentsDict, [])\n assert.lengthOf(issues, 0)\n })\n it('should trim the new line carriages created by windows tabular files,', () => {\n const tsvs = [\n {\n contents: 'participant_id\\t\\r\\n',\n file: file,\n },\n {\n contents: 'participant_id\\r\\n',\n file: file,\n },\n ]\n const issues = validateTsvColumns(tsvs, {}, [])\n assert.lengthOf(issues, 0)\n })\n })\n\n describe('requires_tsv_non_custom_columns for validatePetBloodHeaders', () => {\n let tsv, mergedDict, schema\n beforeEach(() => {\n tsv = {\n contents: 'col_A\\tcol_B\\n',\n file: { name: 'test_blood.tsv' },\n }\n // associated json sidecar to tsv\n mergedDict = {\n PropA: true,\n PropB: true,\n PropC: '',\n }\n // minimal subset of bids-validator/validators/json/schemas/pet_blood.json\n schema = {\n properties: {\n PropA: {\n type: 'boolean',\n requires_tsv_non_custom_columns: ['col_A'],\n },\n PropB: {\n type: 'boolean',\n requires_tsv_non_custom_columns: ['col_A', 'col_B'],\n },\n PropC: { type: 'string' },\n },\n }\n })\n it('passes when required columns are present', () => {\n const issues = validatePetBloodHeaders(tsv, mergedDict, schema)\n assert.isEmpty(issues)\n })\n it('does not require columns when associated JSON properties are false', () => {\n tsv.contents = '\\n'\n mergedDict.PropA = false\n mergedDict.PropB = false\n const issues = validatePetBloodHeaders(tsv, mergedDict, schema)\n assert.isEmpty(issues)\n })\n it('requires column when JSON property is true', () => {\n tsv.contents = 'col_A'\n const issues = validatePetBloodHeaders(tsv, mergedDict, schema)\n assert.lengthOf(issues, 1)\n assert.equal(issues[0].key, 'TSV_MISSING_REQUIRED_COLUMN')\n assert.equal(issues[0].file.name, tsv.file.name)\n assert.equal(issues[0].severity, 'error')\n assert.include(issues[0].evidence, 'missing header \"col_B\"')\n })\n it('produces errors for each missing column', () => {\n tsv.contents = '\\n'\n const issues = validatePetBloodHeaders(tsv, mergedDict, schema)\n assert.lengthOf(issues, 2)\n })\n })\n it('should strip byte order marks from the start of TSV files', () => {\n const tsvs = [\n {\n contents: '\\uFEFFparticipant_id\\t\\r\\n',\n file: {\n name: 'participants.tsv',\n relativePath: './participants.tsv',\n },\n },\n ]\n const issues = validateTsvColumns(tsvs, {}, [])\n assert.lengthOf(issues, 0)\n })\n it('should generate error with empty columns', () => {\n const tsvs = [\n {\n contents: '\\t',\n file: {\n name: 'test.tsv',\n relativePath: './test.tsv',\n },\n },\n {\n contents: '\\t\\t',\n file: {\n name: 'test.tsv',\n relativePath: './test.tsv',\n },\n },\n ]\n const issues = validateTsvColumns(tsvs, {}, [])\n assert.lengthOf(issues, 2)\n })\n})\n" }, { "alpha_fraction": 0.5735567808151245, "alphanum_fraction": 0.576815664768219, "avg_line_length": 29.685714721679688, "blob_id": "ba623250c2c8da68a62a7da3d68232f00ac53846", "content_id": "b5e4db9347728a794bbff9c9a6b72915e0325d1f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2148, "license_type": "permissive", "max_line_length": 86, "num_lines": 70, "path": "/bids-validator/src/summary/collectSubjectMetadata.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { SubjectMetadata } from '../types/validation-result.ts'\nconst PARTICIPANT_ID = 'participantId'\n/**\n * Go from tsv format string with participant_id as a required header to array of form\n * [\n * {\n * participantId: 'participant_id_1'\n * foo: 'x',\n * ...\n * },\n * {\n * participantId: 'participant_id_2'\n * foo: 'y',\n * ...\n * }\n * ...\n * ]\n *\n * returns null if participant_id is not a header or file contents do not exist\n * @param {string} participantsTsvContent\n */\nexport const collectSubjectMetadata = (\n participantsTsvContent: string,\n): SubjectMetadata[] => {\n if (!participantsTsvContent) {\n return []\n }\n\n const contentTable = participantsTsvContent\n .split(/\\r?\\n/)\n .filter((row) => row !== '')\n .map((row) => row.split('\\t'))\n const [snakeCaseHeaders, ...subjectData] = contentTable\n const headers = snakeCaseHeaders.map((header) =>\n header === 'participant_id' ? PARTICIPANT_ID : header,\n )\n const targetKeys = [PARTICIPANT_ID, 'age', 'sex', 'group']\n .map((key) => ({\n key,\n index: headers.findIndex((targetKey) => targetKey === key),\n }))\n .filter(({ index }) => index !== -1)\n const participantIdKey = targetKeys.find(({ key }) => key === PARTICIPANT_ID)\n const ageKey = targetKeys.find(({ key }) => key === 'age')\n if (participantIdKey === undefined) return [] as SubjectMetadata[]\n else\n return subjectData\n .map((data) => {\n // this first map is for transforming any data coming out of participants.tsv:\n // strip subject ids to match metadata.subjects: 'sub-01' -> '01'\n data[participantIdKey.index] = data[participantIdKey.index].replace(\n /^sub-/,\n '',\n )\n // make age an integer\n // @ts-expect-error\n if (ageKey) data[ageKey.index] = parseInt(data[ageKey.index])\n return data\n })\n .map((data) =>\n //extract all target metadata for each subject\n targetKeys.reduce(\n (subject, { key, index }) => ({\n ...subject,\n [key]: data[index],\n }),\n {},\n ),\n ) as SubjectMetadata[]\n}\n" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.671875, "avg_line_length": 63, "blob_id": "9a530284427047e7ed77b99600bf82b60f38117e", "content_id": "2516b6191aaf2477a762bcbdb9c7d34d293caa7c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 64, "license_type": "permissive", "max_line_length": 63, "num_lines": 1, "path": "/bids-validator/src/deps/fs.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export { walk } from 'https://deno.land/[email protected]/fs/walk.ts'\n" }, { "alpha_fraction": 0.6285338401794434, "alphanum_fraction": 0.6495726704597473, "avg_line_length": 22.399999618530273, "blob_id": "2f504bcea2ed881d667fe86f148ff35d33e1ed96", "content_id": "29faba1b934609a56a9e58b50330d66276dec18d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1521, "license_type": "permissive", "max_line_length": 74, "num_lines": 65, "path": "/bids-validator/src/tests/simple-dataset.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { FileTree } from '../types/filetree.ts'\nimport { nullReadBytes } from './nullReadBytes.ts'\n\nconst text = () => Promise.resolve('')\n\n// Very basic dataset modeled for tests\nconst rootFileTree = new FileTree('/', '')\nconst subjectFileTree = new FileTree('/sub-01', 'sub-01', rootFileTree)\nconst anatFileTree = new FileTree('/sub-01/anat', 'anat', subjectFileTree)\nanatFileTree.files = [\n {\n text,\n path: '/sub-01/anat/sub-01_T1w.nii.gz',\n name: 'sub-01_T1w.nii.gz',\n size: 311112,\n ignored: false,\n stream: new ReadableStream<Uint8Array>(),\n readBytes: nullReadBytes,\n },\n]\nsubjectFileTree.files = []\nsubjectFileTree.directories = [anatFileTree]\nrootFileTree.files = [\n {\n text,\n path: '/dataset_description.json',\n name: 'dataset_description.json',\n size: 240,\n ignored: false,\n stream: new ReadableStream(),\n readBytes: nullReadBytes,\n },\n {\n text,\n path: '/README',\n name: 'README',\n size: 709,\n ignored: false,\n stream: new ReadableStream(),\n readBytes: nullReadBytes,\n },\n {\n text,\n path: '/CHANGES',\n name: 'CHANGES',\n size: 39,\n ignored: false,\n stream: new ReadableStream(),\n readBytes: nullReadBytes,\n },\n {\n text,\n path: '/participants.tsv',\n name: 'participants.tsv',\n size: 36,\n ignored: false,\n stream: new ReadableStream(),\n readBytes: nullReadBytes,\n },\n]\nrootFileTree.directories = [subjectFileTree]\n\nexport const simpleDataset = rootFileTree\n\nexport const simpleDatasetFileCount = 5\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 69, "blob_id": "9f082bac4e32da0f0abc83eb1d5e1e57e49a024c", "content_id": "5fc3225d0a14608da443fd6c5aeb2462ee557b0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 70, "license_type": "permissive", "max_line_length": 69, "num_lines": 1, "path": "/bids-validator/src/deps/fmt.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export * as colors from 'https://deno.land/[email protected]/fmt/colors.ts'\n" }, { "alpha_fraction": 0.600570023059845, "alphanum_fraction": 0.6050488352775574, "avg_line_length": 24.06122398376465, "blob_id": "c430fff978c68afa695d20aa22d215733207f14b", "content_id": "3d0c639be4b5704ef6cd817842c544a92d905cea", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2456, "license_type": "permissive", "max_line_length": 68, "num_lines": 98, "path": "/bids-validator/validators/bvec/bvec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\nimport { type } from '../../utils'\n\n/**\n * bvec\n *\n * Takes a bvec file, its contents as a string\n * and a callback as arguments. Callsback\n * with any issues it finds while validating\n * against the BIDS specification.\n */\nexport default function bvec(file, contents, callback) {\n let issues = []\n\n issues = issues.concat(checkType(contents, file))\n if (issues.length) {\n return callback(issues)\n }\n\n // check that there are exactly three rows\n issues = issues.concat(checkNumberOfRows(contents, file))\n\n // check that each row is the same length\n issues = issues.concat(checkRowConsistency(contents, file))\n\n // check that each value is a number\n issues = issues.concat(checkValueValidity(contents, file))\n\n callback(issues)\n}\n\nfunction checkType(contents, file) {\n const issues = []\n // throw error if contents are undefined or the wrong type\n if (!type.checkType(contents, 'string')) {\n const evidence = contents\n ? 'The contents of this .bvec file have type ' +\n typeof contents +\n ' but should be a string.'\n : 'The contents of this .bvec file are undefined.'\n issues.push(\n new Issue({\n code: 88,\n file: file,\n evidence: evidence,\n }),\n )\n }\n return issues\n}\n\nfunction checkNumberOfRows(contents, file) {\n const issues = []\n if (contents.replace(/^\\s+|\\s+$/g, '').split('\\n').length !== 3) {\n issues.push(\n new Issue({\n code: 31,\n file: file,\n }),\n )\n }\n return issues\n}\n\nfunction checkRowConsistency(contents, file) {\n let rowLength = false\n\n const rows = contents.replace(/^\\s+|\\s+$/g, '').split('\\n')\n\n for (let i = 0; i < rows.length; i++) {\n const row = rows[i].replace(/^\\s+|\\s+$/g, '').split(' ')\n if (!rowLength) {\n rowLength = row.length\n }\n\n // check for consistent row length\n if (rowLength !== row.length) {\n return [new Issue({ code: 46, file: file })]\n }\n }\n return []\n}\n\nfunction checkValueValidity(contents, file) {\n const rows = contents.replace(/^\\s+|\\s+$/g, '').split('\\n')\n for (let i = 0; i < rows.length; i++) {\n const row = rows[i].replace(/^\\s+|\\s+$/g, '').split(' ')\n\n // check for proper separator and value type\n const hasIssue = row\n .map((value) => !type.checkType(value, 'number'))\n .some((val) => val)\n if (hasIssue) {\n return [new Issue({ code: 47, file: file })]\n }\n }\n return []\n}\n" }, { "alpha_fraction": 0.6850393414497375, "alphanum_fraction": 0.6850393414497375, "avg_line_length": 13.11111068725586, "blob_id": "b383a45d34e2714c812a8467bdcb710aab0ae44f", "content_id": "e7676d7652f5843b3508736d1f7546e93d9a4ca2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 127, "license_type": "permissive", "max_line_length": 33, "num_lines": 9, "path": "/bids-validator/validators/bval/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import bval from './bval'\nimport validate from './validate'\n\nexport { bval, validate }\n\nexport default {\n bval,\n validate,\n}\n" }, { "alpha_fraction": 0.621052622795105, "alphanum_fraction": 0.6225563883781433, "avg_line_length": 23.629629135131836, "blob_id": "f1f544686544da3d6e45e6870022427f6374849a", "content_id": "f640e0f054afb30507adc5a61be8aab35732038c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 665, "license_type": "permissive", "max_line_length": 73, "num_lines": 27, "path": "/bids-validator/utils/summary/collectSubjects.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import type from '../type'\n\nconst collectSubjects = (fileList) => {\n const subjects = []\n const fileKeys = Object.keys(fileList)\n fileKeys.forEach((key) => {\n const file = fileList[key]\n if (\n !type.file.isStimuliData(file.relativePath) &&\n type.isBIDS(file.relativePath)\n ) {\n const pathValues = type.getPathValues(file.relativePath)\n const isEmptyRoom = pathValues.sub && pathValues.sub == 'emptyroom'\n\n if (\n pathValues.sub &&\n subjects.indexOf(pathValues.sub) === -1 &&\n !isEmptyRoom\n ) {\n subjects.push(pathValues.sub)\n }\n }\n })\n return subjects\n}\n\nexport default collectSubjects\n" }, { "alpha_fraction": 0.675000011920929, "alphanum_fraction": 0.675000011920929, "avg_line_length": 58.5, "blob_id": "d02a31549d3ef7298c8b48417f0778c324cf3d03", "content_id": "79c1d91700204becf8215035c900d83a9b68057f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 120, "license_type": "permissive", "max_line_length": 85, "num_lines": 2, "path": "/bids-validator/bids-validator-deno", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "#!/bin/env -S deno run --allow-read --allow-write --allow-env --allow-net --allow-run\nimport './src/bids-validator.ts'\n\n" }, { "alpha_fraction": 0.8032786846160889, "alphanum_fraction": 0.8032786846160889, "avg_line_length": 19.33333396911621, "blob_id": "7ddbac88b6f725ab9d68ad3e799ea6a189b48ed3", "content_id": "7ce367cbc071d8564e9068e78074f581d05cc4f4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 61, "license_type": "permissive", "max_line_length": 33, "num_lines": 3, "path": "/bids-validator/validators/events/validate.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import eventsTest from './events'\n\nexport default eventsTest\n" }, { "alpha_fraction": 0.6429938077926636, "alphanum_fraction": 0.6453849673271179, "avg_line_length": 30.923664093017578, "blob_id": "df3303a62e97ecc73a722f7f4d3c7f11c0b5e569", "content_id": "1b80b020765bc011e7941900fb3fde5a0047347d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4182, "license_type": "permissive", "max_line_length": 116, "num_lines": 131, "path": "/bids-validator/src/schema/associations.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { ContextAssociations } from '../types/context.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { BIDSContext } from './context.ts'\nimport { readEntities } from './entities.ts'\nimport { parseTSV } from '../files/tsv.ts'\n\n// type AssociationsLookup = Record<keyof ContextAssociations, { extensions: string[], inherit: boolean, load: ... }\n\n/**\n * This object describes associated files for data files in a bids dataset\n * For any given datafile we iterate over every key/value in this object.\n * For each entry we see if any files in the datafiles directory have:\n * - a suffix that matches the key\n * - an extension in the entry's extension array.\n * - that all the files entities and their values match those of the datafile\n * If the entry allows for inheritance we recurse up the filetree looking for other applicable files.\n * The load functions are incomplete, some associations need to read data from a file so they're\n * returning promises for now.\n */\nconst associationLookup = {\n events: {\n extensions: ['.tsv'],\n inherit: true,\n load: (file: BIDSFile): Promise<ContextAssociations['events']> => {\n return file.text().then((text) => parseTSV(text))\n },\n },\n aslcontext: {\n extensions: ['.tsv'],\n inherit: true,\n load: (file: BIDSFile): Promise<ContextAssociations['aslcontext']> => {\n return Promise.resolve({ path: file.path, n_rows: 0, volume_type: [] })\n },\n },\n m0scan: {\n extensions: ['.nii', '.nii.gz'],\n inherit: false,\n load: (file: BIDSFile): Promise<ContextAssociations['m0scan']> => {\n return Promise.resolve({ path: file.path })\n },\n },\n magnitude: {\n extensions: ['.nii', '.nii.gz'],\n inherit: false,\n load: (file: BIDSFile): Promise<ContextAssociations['magnitude']> => {\n return Promise.resolve({ path: file.path, onset: 'silly' })\n },\n },\n magnitude1: {\n extensions: ['.nii', '.nii.gz'],\n inherit: false,\n load: (file: BIDSFile): Promise<ContextAssociations['magnitude1']> => {\n return Promise.resolve({ path: file.path })\n },\n },\n bval: {\n extensions: ['.nii', '.nii.gz'],\n inherit: true,\n load: (file: BIDSFile): Promise<ContextAssociations['bval']> => {\n return Promise.resolve({ path: file.path, n_cols: 0 })\n },\n },\n bvec: {\n extensions: ['.nii', '.nii.gz'],\n inherit: true,\n load: (file: BIDSFile): Promise<ContextAssociations['bvec']> => {\n return Promise.resolve({ path: file.path, n_cols: 0 })\n },\n },\n channels: {\n extensions: ['.tsv'],\n inherit: true,\n load: (file: BIDSFile): Promise<ContextAssociations['events']> => {\n return file.text().then((text) => parseTSV(text))\n },\n },\n}\n\nexport async function buildAssociations(\n fileTree: FileTree,\n source: BIDSContext,\n): Promise<ContextAssociations> {\n const associations: ContextAssociations = {}\n for (const key in associationLookup as typeof associationLookup) {\n const { extensions, inherit } =\n associationLookup[key as keyof typeof associationLookup]\n const paths = getPaths(fileTree, source, key, extensions)\n if (paths.length === 0) {\n continue\n }\n if (paths.length > 1) {\n // error?\n }\n // @ts-expect-error\n associations[key] = await associationLookup[key].load(paths[0])\n }\n return Promise.resolve(associations)\n}\n\nfunction getPaths(\n fileTree: FileTree,\n source: BIDSContext,\n targetSuffix: string,\n targetExtensions: string[],\n) {\n const validAssociations = fileTree.files.filter((file) => {\n const { suffix, extension, entities } = readEntities(file.name)\n return (\n targetExtensions.includes(extension) &&\n suffix === targetSuffix &&\n Object.keys(entities).every((entity) => {\n return (\n entity in source.entities &&\n entities[entity] === source.entities[entity]\n )\n })\n )\n })\n\n const nextDir = fileTree.directories.find((directory) => {\n return source.file.path.startsWith(directory.path)\n })\n\n if (nextDir) {\n validAssociations.push(\n ...getPaths(nextDir, source, targetSuffix, targetExtensions),\n )\n }\n return validAssociations\n}\n" }, { "alpha_fraction": 0.5805284380912781, "alphanum_fraction": 0.5921100378036499, "avg_line_length": 29.032608032226562, "blob_id": "496d0125e1a2e461da84ccb91cdf09ea1f2d7501", "content_id": "107167699d9f6e96351f6a1057e5823cc3555f57", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2773, "license_type": "permissive", "max_line_length": 77, "num_lines": 92, "path": "/bids-validator/utils/__tests__/unit.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import unit from '../unit'\n\nconst { prefixes, roots } = unit\nconst validRoot = roots[0]\n\ndescribe('unit validator', () => {\n it('handles simple units', () => {\n roots.forEach((validRoot) => {\n const goodOutput = unit.validate(validRoot)\n expect(goodOutput.isValid).toBe(true)\n })\n const invalidRoots = [\n 'definitielynotavalidroot',\n `%/${validRoot}`,\n `n/a*${validRoot}`,\n ]\n invalidRoots.forEach((invalidRoot) => {\n const badOutput = unit.validate(invalidRoot)\n expect(badOutput.isValid).toBe(false)\n })\n })\n\n it('handles simple units with prefixes', () => {\n prefixes.forEach((validPrefix) => {\n const goodOutput = unit.validate(validPrefix + validRoot)\n expect(goodOutput.isValid).toBe(true)\n })\n const badOutput = unit.validate('badprefix' + validRoot)\n expect(badOutput.isValid).toBe(false)\n })\n\n const validExponents = [\n '^2',\n '^543',\n '¹²³',\n ...unit.superscriptNumbers.slice(0, 3),\n '^-2',\n '⁻³',\n ]\n it('handles simple units with exponents', () => {\n validExponents.forEach((exp) => {\n const goodOutput = unit.validate(validRoot + exp)\n expect(goodOutput.isValid).toBe(true)\n })\n const invalidExponents = ['^^12', '142', '1', '0', '^.1', '^2.1']\n invalidExponents.forEach((exp) => {\n const badOutput = unit.validate(validRoot + exp)\n expect(badOutput.isValid).toBe(false)\n })\n validExponents.slice(0, 3).forEach((exp) => {\n const badOutput = unit.validate(exp)\n expect(badOutput.isValid).toBe(false)\n })\n })\n\n it('handles derived units', () => {\n const validUnits = ['T/m', 'N*m', 'm^2/s^2', 'mm/ms', 'kT³*nm²', 'm²/s²']\n validUnits.forEach((derivedUnit) => {\n const goodOutput = unit.validate(derivedUnit)\n expect(goodOutput.isValid).toBe(true)\n })\n const invalidUnits = [\n `/${validRoot}`,\n `*${validRoot}`,\n `${validRoot}/`,\n `${validRoot}*`,\n `${validRoot}//${validRoot}`,\n `${validRoot}///${validRoot}`,\n `${validRoot}**${validRoot}`,\n `${validRoot}***${validRoot}`,\n `${roots.slice(0, 3).join('')}`,\n ...validExponents.map((exp) => `${exp}${validRoot}`),\n ]\n invalidUnits.forEach((derivedUnit) => {\n const badOutput = unit.validate(derivedUnit)\n expect(badOutput.isValid).toBe(false)\n })\n })\n\n describe('edge cases', () => {\n it('handles unavailable units', () => {\n const unavaliableUnit = 'n/a'\n const goodOutput = unit.validate(unavaliableUnit)\n expect(goodOutput.isValid).toBe(true)\n })\n it('handles percentages', () => {\n const unavaliableUnit = '%'\n const goodOutput = unit.validate(unavaliableUnit)\n expect(goodOutput.isValid).toBe(true)\n })\n })\n})\n" }, { "alpha_fraction": 0.6561403274536133, "alphanum_fraction": 0.6578947305679321, "avg_line_length": 30.66666603088379, "blob_id": "648f9f3530661ad47561eb92964167b5e19c7b23", "content_id": "863d8c697573c8f18c2632b8ad7c9b8b759a7a12", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1140, "license_type": "permissive", "max_line_length": 74, "num_lines": 36, "path": "/bids-validator/src/schema/walk.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert, assertEquals } from '../deps/asserts.ts'\nimport { BIDSContext } from './context.ts'\nimport { walkFileTree } from './walk.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport {\n simpleDataset,\n simpleDatasetFileCount,\n} from '../tests/simple-dataset.ts'\n\nDeno.test('file tree walking', async (t) => {\n await t.step('visits each file and creates a BIDSContext', async () => {\n const issues = new DatasetIssues()\n for await (const context of walkFileTree(simpleDataset, issues)) {\n assert(\n context instanceof BIDSContext,\n 'walk file tree did not return a BIDSContext',\n )\n }\n })\n await t.step('visits every file expected', async () => {\n const issues = new DatasetIssues()\n let accumulator = 0\n for await (const context of walkFileTree(simpleDataset, issues)) {\n assert(\n context instanceof BIDSContext,\n 'walk file tree did not return a BIDSContext',\n )\n accumulator = accumulator + 1\n }\n assertEquals(\n accumulator,\n simpleDatasetFileCount,\n 'visited file count does not match expected value',\n )\n })\n})\n" }, { "alpha_fraction": 0.6745406985282898, "alphanum_fraction": 0.682414710521698, "avg_line_length": 28.30769157409668, "blob_id": "1578a0f9d673b653e37318518c29d37b4803ba6d", "content_id": "7e0cd1871fb54f4cfc600fe0eb9d857206cad74d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 762, "license_type": "permissive", "max_line_length": 78, "num_lines": 26, "path": "/bids-validator/src/tests/generate-filenames.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "function randomString() {\n return Math.random().toString(36).substring(6)\n}\n\nfunction randomEntityString(prefix?: string): string {\n if (prefix) {\n return `${prefix}-${randomString()}`\n } else {\n return `${randomString()}-${randomString()}`\n }\n}\n\n/**\n * Generate random filenames not following entity ordering rules if length > 0\n */\nexport function generateBIDSFilename(length = 1, extension = '.json') {\n const subject = randomEntityString('sub')\n const session = randomEntityString('ses')\n const run = randomEntityString('run')\n const acquisition = randomEntityString('acq')\n const parts = [subject, session, run, acquisition]\n for (let n = 0; n < length; n++) {\n parts.push(randomEntityString())\n }\n return parts.join('_') + extension\n}\n" }, { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6169544458389282, "avg_line_length": 23.5, "blob_id": "0a625ade60758c8483ebdba44ec8aa9ce6d5ec31", "content_id": "d85121c00fd98dc1cf38115c243e6840ca8ee357", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 637, "license_type": "permissive", "max_line_length": 73, "num_lines": 26, "path": "/bids-validator/utils/summary/collectSessions.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import type from '../type'\n\nconst collectSessions = (fileList) => {\n const sessions = []\n Object.keys(fileList).forEach((key) => {\n const file = fileList[key]\n if (\n !type.file.isStimuliData(file.relativePath) &&\n type.isBIDS(file.relativePath)\n ) {\n const pathValues = type.getPathValues(file.relativePath)\n const isEmptyRoom = pathValues.sub && pathValues.sub == 'emptyroom'\n\n if (\n pathValues.ses &&\n sessions.indexOf(pathValues.ses) === -1 &&\n !isEmptyRoom\n ) {\n sessions.push(pathValues.ses)\n }\n }\n })\n return sessions\n}\n\nexport default collectSessions\n" }, { "alpha_fraction": 0.6850393414497375, "alphanum_fraction": 0.6850393414497375, "avg_line_length": 13.11111068725586, "blob_id": "a37f7a6a1bbca525b8c83f58d7685ce76bdf25a8", "content_id": "682eb4b8ae6a2686ef3232a468968e5194ae61c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 127, "license_type": "permissive", "max_line_length": 33, "num_lines": 9, "path": "/bids-validator/validators/bvec/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import bvec from './bvec'\nimport validate from './validate'\n\nexport { bvec, validate }\n\nexport default {\n bvec,\n validate,\n}\n" }, { "alpha_fraction": 0.4960629940032959, "alphanum_fraction": 0.4960629940032959, "avg_line_length": 26.78125, "blob_id": "655ab45131d4f4a31e6b906fd8533c5d369cfe8b", "content_id": "62a4a0bb723a3fc637838a9513e3d2c93d34e260", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 889, "license_type": "permissive", "max_line_length": 58, "num_lines": 32, "path": "/bids-validator/validators/bvec/validate.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nimport bvec from './bvec'\n\nconst validate = (files, bContentsDict, annexed, dir) => {\n // validate bvec\n let issues = []\n const bvecPromises = files.map(function (file) {\n return utils.limit(\n () =>\n new Promise((resolve, reject) => {\n utils.files\n .readFile(file, annexed, dir)\n .then((contents) => {\n bContentsDict[file.relativePath] = contents\n bvec(file, contents, function (bvecIssues) {\n issues = issues.concat(bvecIssues)\n resolve()\n })\n })\n .catch((err) =>\n utils.issues.redirect(err, reject, () => {\n issues.push(err)\n resolve()\n }),\n )\n }),\n )\n })\n return Promise.all(bvecPromises).then(() => issues)\n}\n\nexport default validate\n" }, { "alpha_fraction": 0.6425594687461853, "alphanum_fraction": 0.6459916830062866, "avg_line_length": 25.660131454467773, "blob_id": "268855406c660dbcb6a78f578afb910b458595af", "content_id": "ca15c43586f0a01b87a195b16b21448307b4059b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4079, "license_type": "permissive", "max_line_length": 78, "num_lines": 153, "path": "/bids-validator/src/files/deno.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Deno specific implementation for reading files\n */\nimport { join, basename } from '../deps/path.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { requestReadPermission } from '../setup/requestPermissions.ts'\nimport { readBidsIgnore, FileIgnoreRules } from './ignore.ts'\n\n/**\n * Thrown when a text file is decoded as UTF-8 but contains UTF-16 characters\n */\nexport class UnicodeDecodeError extends Error {\n constructor(message: string) {\n super(message)\n this.name = 'UnicodeDecode'\n }\n}\n\n/**\n * Deno implementation of BIDSFile\n */\nexport class BIDSFileDeno implements BIDSFile {\n #ignore: FileIgnoreRules\n name: string\n path: string\n #fileInfo?: Deno.FileInfo\n #datasetAbsPath: string\n\n constructor(datasetPath: string, path: string, ignore: FileIgnoreRules) {\n this.#datasetAbsPath = datasetPath\n this.path = path\n this.name = basename(path)\n this.#ignore = ignore\n try {\n this.#fileInfo = Deno.statSync(this._getPath())\n } catch (error) {\n if (error.code === 'ENOENT') {\n this.#fileInfo = Deno.lstatSync(this._getPath())\n }\n }\n }\n\n private _getPath(): string {\n return join(this.#datasetAbsPath, this.path)\n }\n\n get size(): number {\n return this.#fileInfo ? this.#fileInfo.size : -1\n }\n\n get stream(): ReadableStream<Uint8Array> {\n const handle = this.#openHandle()\n return handle.readable\n }\n\n get ignored(): boolean {\n return this.#ignore.test(this.path)\n }\n\n /**\n * Read the entire file and decode as utf-8 text\n */\n async text(): Promise<string> {\n const streamReader = this.stream\n .pipeThrough(new TextDecoderStream('utf-8'))\n .getReader()\n let data = ''\n try {\n // Read once to check for unicode issues\n const { done, value } = await streamReader.read()\n // Check for UTF-16 BOM\n if (value && value.startsWith('\\uFFFD')) {\n throw new UnicodeDecodeError('This file appears to be UTF-16')\n }\n if (done) return data\n data += value\n // Continue reading the rest of the file if no unicode issues were found\n while (true) {\n const { done, value } = await streamReader.read()\n if (done) return data\n data += value\n }\n } finally {\n streamReader.releaseLock()\n }\n }\n\n /**\n * Read bytes in a range efficiently from a given file\n */\n async readBytes(size: number, offset = 0): Promise<Uint8Array> {\n const handle = this.#openHandle()\n const buf = new Uint8Array(size)\n await handle.seek(offset, Deno.SeekMode.Start)\n await handle.read(buf)\n Deno.close(handle.rid)\n return buf\n }\n\n /**\n * Return a Deno file handle\n */\n #openHandle(): Deno.FsFile {\n // Avoid asking for write access\n const openOptions = { read: true, write: false }\n return Deno.openSync(this._getPath(), openOptions)\n }\n}\n\nexport async function _readFileTree(\n rootPath: string,\n relativePath: string,\n ignore: FileIgnoreRules,\n parent?: FileTree,\n): Promise<FileTree> {\n await requestReadPermission()\n const name = basename(relativePath)\n const tree = new FileTree(relativePath, name, parent)\n\n for await (const dirEntry of Deno.readDir(join(rootPath, relativePath))) {\n if (dirEntry.isFile || dirEntry.isSymlink) {\n const file = new BIDSFileDeno(\n rootPath,\n join(relativePath, dirEntry.name),\n ignore,\n )\n // For .bidsignore, read in immediately and add the rules\n if (dirEntry.name === '.bidsignore') {\n ignore.add(await readBidsIgnore(file))\n }\n tree.files.push(file)\n }\n if (dirEntry.isDirectory) {\n const dirTree = await _readFileTree(\n rootPath,\n join(relativePath, dirEntry.name),\n ignore,\n tree,\n )\n tree.directories.push(dirTree)\n }\n }\n return tree\n}\n\n/**\n * Read in the target directory structure and return a FileTree\n */\nexport function readFileTree(rootPath: string): Promise<FileTree> {\n const ignore = new FileIgnoreRules([])\n return _readFileTree(rootPath, '/', ignore)\n}\n" }, { "alpha_fraction": 0.6734017133712769, "alphanum_fraction": 0.6734017133712769, "avg_line_length": 34.27659606933594, "blob_id": "bddaab28da36e0feab6587f82559c41d0f884e81", "content_id": "5e4c66165742b91d8a60bb57af14cd9a5663bab1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3316, "license_type": "permissive", "max_line_length": 128, "num_lines": 94, "path": "/bids-validator/src/issues/list.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { IssueDefinitionRecord } from '../types/issues.ts'\n\nexport const filenameIssues: IssueDefinitionRecord = {\n INVALID_ENTITY_LABEL: {\n severity: 'error',\n reason:\n \"entity label doesn't match format found for files with this suffix\",\n },\n ENTITY_WITH_NO_LABEL: {\n severity: 'error',\n reason: 'Found an entity with no label.',\n },\n MISSING_REQUIRED_ENTITY: {\n severity: 'error',\n reason: 'Missing required entity for files with this suffix.',\n },\n ENTITY_NOT_IN_RULE: {\n severity: 'error',\n reason:\n 'Entity not listed as required or optional for files with this suffix',\n },\n DATATYPE_MISMATCH: {\n severity: 'error',\n reason:\n 'The datatype directory does not match datatype of found suffix and extension',\n },\n ALL_FILENAME_RULES_HAVE_ISSUES: {\n severity: 'error',\n reason:\n 'Multiple filename rules were found as potential matches. All of them had at least one issue during filename validation.',\n },\n EXTENSION_MISMATCH: {\n severity: 'error',\n reason:\n 'Extension used by file does not match allowed extensions for its suffix',\n },\n JSON_KEY_REQUIRED: {\n severity: 'error',\n reason: \"A data file's JSON sidecar is missing a key listed as required.\",\n },\n JSON_KEY_RECOMMENDED: {\n severity: 'warning',\n reason: 'A data files JSON sidecar is missing a key listed as recommended.',\n },\n TSV_ERROR: {\n severity: 'error',\n reason: 'generic place holder for errors from tsv files',\n },\n TSV_COLUMN_MISSING: {\n severity: 'error',\n reason: 'A required column is missing',\n },\n TSV_COLUMN_ORDER_INCORRECT: {\n severity: 'error',\n reason: 'Some TSV columns are in the incorrect order',\n },\n TSV_ADDITIONAL_COLUMNS_NOT_ALLOWED: {\n severity: 'error',\n reason:\n 'A TSV file has extra columns which are not allowed for its file type',\n },\n TSV_INDEX_VALUE_NOT_UNIQUE: {\n severity: 'error',\n reason:\n 'An index column(s) was specified for the tsv file and not all of the values for it are unique.',\n },\n TSV_VALUE_INCORRECT_TYPE: {\n severity: 'error',\n reason:\n 'A value in a column did match the acceptable type for that column headers specified format.',\n },\n CHECK_ERROR: {\n severity: 'error',\n reason:\n 'generic place holder for errors from failed `checks` evaluated from schema.',\n },\n NOT_INCLUDED: {\n severity: 'error',\n reason:\n 'Files with such naming scheme are not part of BIDS specification. This error is most commonly ' +\n 'caused by typos in file names that make them not BIDS compatible. Please consult the specification and ' +\n 'make sure your files are named correctly. If this is not a file naming issue (for example when including ' +\n 'files not yet covered by the BIDS specification) you should include a \".bidsignore\" file in your dataset (see' +\n ' https://github.com/bids-standard/bids-validator#bidsignore for details). Please ' +\n 'note that derived (processed) data should be placed in /derivatives folder and source data (such as DICOMS ' +\n 'or behavioural logs in proprietary formats) should be placed in the /sourcedata folder.',\n },\n EMPTY_FILE: {\n severity: 'error',\n reason: 'Empty files not allowed.',\n },\n}\n\nexport const nonSchemaIssues = { ...filenameIssues }\n" }, { "alpha_fraction": 0.6601455211639404, "alphanum_fraction": 0.6610998511314392, "avg_line_length": 29.819852828979492, "blob_id": "25519d480f83ac03ebc2c3b6f0569ea3b7e977b9", "content_id": "edf0b4054b24c8a7ac738c2510252d3cc387f20b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8383, "license_type": "permissive", "max_line_length": 98, "num_lines": 272, "path": "/bids-validator/validators/bids/fullTest.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import BIDS from './obj'\nimport utils from '../../utils'\nconst Issue = utils.issues.Issue\nimport tsv from '../tsv'\nimport json from '../json'\nimport NIFTI from '../nifti'\nimport bval from '../bval'\nimport bvec from '../bvec'\nimport microscopy from '../microscopy'\nimport Events from '../events'\nimport hed from '../hed'\nimport { session } from '../session'\nimport checkAnyDataPresent from '../checkAnyDataPresent'\nimport headerFields from '../headerFields'\nimport subSesMismatchTest from './subSesMismatchTest'\nimport groupFileTypes from './groupFileTypes'\nimport subjects from './subjects'\nimport checkDatasetDescription from './checkDatasetDescription'\nimport checkReadme from './checkReadme'\nimport validateMisc from '../../utils/files/validateMisc'\nimport collectSubjectMetadata from '../../utils/summary/collectSubjectMetadata'\nimport collectPetFields from '../../utils/summary/collectPetFields'\nimport collectModalities from '../../utils/summary/collectModalities'\n\n/**\n * Full Test\n *\n * Takes on an array of files, callback, and boolean indicating if git-annex is used.\n * Starts the validation process for a BIDS package.\n */\nconst fullTest = (fileList, options, annexed, dir, schema, callback) => {\n const self = BIDS\n self.options = options\n\n const jsonContentsDict = {}\n const bContentsDict = {}\n const events = []\n const stimuli = {\n events: [],\n directory: [],\n }\n const jsonFiles = []\n const headers = []\n const participants = null\n const phenotypeParticipants = []\n\n const tsvs = []\n\n if (self.options.blacklistModalities) {\n const relativePaths = Object.keys(fileList).map(\n (file) => fileList[file].relativePath,\n )\n const preIgnoreModalities = collectModalities(relativePaths)\n self.options.blacklistModalities.map((mod) => {\n if (preIgnoreModalities.primary.includes(mod)) {\n self.issues.push(\n new Issue({\n file: mod,\n evidence: `found ${mod} files`,\n code: 139,\n }),\n )\n }\n })\n }\n\n const summary = utils.collectSummary(fileList, self.options, schema)\n\n // remove size redundancies\n for (const key in fileList) {\n if (fileList.hasOwnProperty(key)) {\n const file = fileList[key]\n if (typeof file.stats === 'object' && file.stats.hasOwnProperty('size'))\n delete file.size\n }\n }\n\n // remove ignored files from list:\n Object.keys(fileList).forEach(function (key) {\n if (fileList[key].ignore) {\n delete fileList[key]\n }\n })\n\n self.issues = self.issues.concat(subSesMismatchTest(fileList))\n\n // check for illegal character in task name and acq name\n self.issues = self.issues.concat(utils.files.illegalCharacterTest(fileList))\n\n const files = groupFileTypes(fileList, self.options)\n\n // generate issues for all files that do not comply with\n // bids spec\n files.invalid.map(function (file) {\n self.issues.push(\n new Issue({\n file: file,\n evidence: file.name,\n code: 1,\n }),\n )\n })\n\n // check if dataset contains T1w\n if (!summary.dataTypes.includes('T1w')) {\n self.issues.push(\n new Issue({\n code: 53,\n }),\n )\n }\n\n validateMisc(files.misc)\n .then((miscIssues) => {\n self.issues = self.issues.concat(miscIssues)\n\n // TSV validation\n return tsv.validate(\n files.tsv,\n fileList,\n tsvs,\n events,\n participants,\n phenotypeParticipants,\n stimuli,\n )\n })\n .then(({ tsvIssues, participantsTsvContent }) => {\n self.issues = self.issues.concat(tsvIssues)\n\n // extract metadata on participants to metadata.age and\n // return metadata on each subject from participants.tsv\n summary.subjectMetadata = collectSubjectMetadata(participantsTsvContent)\n // Bvec validation\n return bvec.validate(files.bvec, bContentsDict)\n })\n .then((bvecIssues) => {\n self.issues = self.issues.concat(bvecIssues)\n\n // Bval validation\n return bval.validate(files.bval, bContentsDict)\n })\n .then((bvalIssues) => {\n self.issues = self.issues.concat(bvalIssues)\n\n // Load json files and construct a contents object with field, value pairs\n return json.load(files.json, jsonFiles, jsonContentsDict)\n })\n .then((jsonLoadIssues) => {\n self.issues = self.issues.concat(jsonLoadIssues)\n\n // Check for at least one subject\n const noSubjectIssues = subjects.atLeastOneSubject(fileList)\n self.issues = self.issues.concat(noSubjectIssues)\n\n // Check for datasetDescription file in the proper place\n const datasetDescriptionIssues = checkDatasetDescription(jsonContentsDict)\n self.issues = self.issues.concat(datasetDescriptionIssues)\n\n // Check for README file in the proper place\n const readmeIssues = checkReadme(fileList)\n self.issues = self.issues.concat(readmeIssues)\n\n // Check for microscopy samples file and json files\n if (summary.modalities.includes('Microscopy')) {\n const samplesIssues = microscopy.checkSamples(fileList)\n const jsonAndFieldIssues = microscopy.checkJSONAndField(\n files,\n jsonContentsDict,\n fileList,\n )\n self.issues = self.issues\n .concat(samplesIssues)\n .concat(jsonAndFieldIssues)\n }\n // Validate json files and contents\n return json.validate(jsonFiles, fileList, jsonContentsDict, summary)\n })\n .then((jsonIssues) => {\n self.issues = self.issues.concat(jsonIssues)\n\n // OME-TIFF consistency check\n return microscopy.validate(files.ome, jsonContentsDict)\n })\n .then((omeIssues) => {\n self.issues = self.issues.concat(omeIssues)\n // Nifti validation\n return NIFTI.validate(\n files.nifti,\n fileList,\n self.options,\n jsonContentsDict,\n bContentsDict,\n events,\n headers,\n annexed,\n dir,\n )\n })\n .then((niftiIssues) => {\n self.issues = self.issues.concat(niftiIssues)\n\n // Issues related to participants not listed in the subjects list\n const participantsInSubjectsIssues = subjects.participantsInSubjects(\n participants,\n summary.subjects,\n )\n self.issues = self.issues.concat(participantsInSubjectsIssues)\n\n // Check for equal number of participants from ./phenotype/*.tsv and participants in dataset\n const phenotypeIssues = tsv.checkPhenotype(phenotypeParticipants, summary)\n self.issues = self.issues.concat(phenotypeIssues)\n\n // Validate nii header fields\n self.issues = self.issues.concat(headerFields(headers))\n\n // Events validation\n stimuli.directory = files.stimuli\n self.issues = self.issues.concat(\n Events.validateEvents(events, stimuli, headers, jsonContentsDict),\n )\n\n // check the HED strings\n return hed(tsvs, jsonContentsDict, jsonFiles, dir)\n })\n .then((hedIssues) => {\n self.issues = self.issues.concat(hedIssues)\n\n // Validate custom fields in all TSVs and add any issues to the list\n self.issues = self.issues.concat(\n tsv.validateTsvColumns(tsvs, jsonContentsDict, headers),\n )\n // Validate continuous recording files\n self.issues = self.issues.concat(\n tsv.validateContRec(files.contRecord, jsonContentsDict),\n )\n\n if (!options.ignoreSubjectConsistency) {\n // Validate session files\n self.issues = self.issues.concat(session(fileList))\n }\n\n // Determine if each subject has data present\n self.issues = self.issues.concat(\n checkAnyDataPresent(fileList, summary.subjects),\n )\n\n // Group summary modalities\n summary.modalities = utils.modalities.group(summary.modalities)\n\n // collect PET specific fields\n if (summary.modalities.includes('PET'))\n summary.pet = collectPetFields(jsonContentsDict)\n\n // Format issues\n const issues = utils.issues.format(self.issues, summary, self.options)\n callback(issues, summary)\n })\n .catch((err) => {\n // take internal exceptions and push to issues\n // note: exceptions caught here may have skipped subsequent validations\n const issues = utils.issues.exceptionHandler(\n err,\n self.issues,\n summary,\n self.options,\n )\n callback(issues, summary)\n })\n}\n\nexport default fullTest\n" }, { "alpha_fraction": 0.6742424368858337, "alphanum_fraction": 0.6742424368858337, "avg_line_length": 16.600000381469727, "blob_id": "02d760fe435b23142007cb4513c8e8880e546fbe", "content_id": "9f9c9e5f265499c064ecb700c296b3f86ccdafb2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 264, "license_type": "permissive", "max_line_length": 60, "num_lines": 15, "path": "/bids-validator/utils/files/newFile.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import FileAPI from './FileAPI'\n\n/**\n * New File\n *\n * Creates an empty File object\n *\n * @param {string} filename - the filename without path info\n */\nfunction newFile(filename) {\n var File = FileAPI()\n return new File([''], filename)\n}\n\nexport default newFile\n" }, { "alpha_fraction": 0.5705552697181702, "alphanum_fraction": 0.5776872038841248, "avg_line_length": 26.64788818359375, "blob_id": "8c728f08ea21acef0e879b9068a319d953e9a2b2", "content_id": "117bb838301167c152f20c97bd6db2c24433d94b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1963, "license_type": "permissive", "max_line_length": 82, "num_lines": 71, "path": "/bids-validator/utils/files/testFile.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import fs from 'fs'\nimport Issue from '../../utils/issues'\nimport remoteFiles from './remoteFiles'\nimport options from '../../utils/options'\n\n/**\n * Test File\n *\n * Takes a file and callback and tests if it's viable for\n * reading and is larger than 0 kb. Calls back with an error and stats if it isn't\n * or null and stats if it is.\n */\nfunction testFile(file, annexed, dir, callback) {\n fs.access(file.path, function (accessErr) {\n if (!accessErr) {\n // accessible\n handleFsAccess(file, callback)\n } else {\n // inaccessible\n fs.lstat(file.path, function (lstatErr, lstats) {\n if (!lstatErr && lstats && lstats.isSymbolicLink()) {\n // symlink\n if (options.getOptions().remoteFiles)\n // only follow symlinks when --remoteFiles option is on\n handleRemoteAccess(file, annexed, dir, callback)\n else\n callback(\n new Issue({\n code: 114,\n file,\n }),\n file.stats,\n )\n } else {\n // inaccessible local file\n callback(new Issue({ code: 44, file: file }), file.stats)\n }\n })\n }\n })\n}\n\nfunction handleFsAccess(file, callback) {\n process.nextTick(function () {\n if (file.stats.size === 0) {\n callback(\n new Issue({\n code: 99,\n file: file,\n reason: `Empty files (${file.path}) not allowed.`,\n }),\n file.stats,\n )\n }\n callback(null, file.stats)\n })\n}\n\nfunction handleRemoteAccess(file, annexed, dir, callback) {\n if (annexed) {\n // Set byte retrieval limits based on file type\n const limit = file.name.includes('.nii') ? 500 : false\n // Call process to get remote files\n // It will call callback with content or error\n remoteFiles.getAnnexedFile(file, dir, limit, callback)\n } else {\n callback(new Issue({ code: 43, file: file }), file.stats)\n }\n}\n\nexport default testFile\n" }, { "alpha_fraction": 0.7137096524238586, "alphanum_fraction": 0.7137096524238586, "avg_line_length": 34.42856979370117, "blob_id": "6ba1bc789101526c8e7a8d0ad8d5cb665e802d63", "content_id": "dfb3e6a8e26c9b063245db42c397bdc6a1d1aa13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 248, "license_type": "permissive", "max_line_length": 79, "num_lines": 7, "path": "/bids-validator/utils/isNode.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const isBrowserWorker = () =>\n // eslint-disable-next-line no-undef\n typeof WorkerGlobalScope !== 'undefined' && self instanceof WorkerGlobalScope\n\nconst isNode = () => typeof window === 'undefined' && !isBrowserWorker()\n\nexport default isNode()\n" }, { "alpha_fraction": 0.6253968477249146, "alphanum_fraction": 0.6730158925056458, "avg_line_length": 21.5, "blob_id": "056a0d35d9d5f402f21d5c4531fa11c1ee68e16f", "content_id": "4c1c6a056b3738fd902ad1545c9ccce73c158691", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 315, "license_type": "permissive", "max_line_length": 75, "num_lines": 14, "path": "/bids-validator/src/deps/logger.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export {\n Logger,\n LogLevels,\n error,\n critical,\n debug,\n info,\n setup,\n warning,\n handlers,\n getLogger,\n} from 'https://deno.land/[email protected]/log/mod.ts'\nexport { LogLevelNames } from 'https://deno.land/[email protected]/log/levels.ts'\nexport type { LevelName } from 'https://deno.land/[email protected]/log/mod.ts'\n" }, { "alpha_fraction": 0.6896551847457886, "alphanum_fraction": 0.6931034326553345, "avg_line_length": 31.22222137451172, "blob_id": "9fce2c0b9893a9719595d0e5be9875535e9458ee", "content_id": "878e148a1b2e8ee7a81472234da70fd254c3114f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 290, "license_type": "permissive", "max_line_length": 66, "num_lines": 9, "path": "/bids-validator/src/validators/internal/emptyFile.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { CheckFunction } from '../../types/check.ts'\n\n// Non-schema EMPTY_FILE implementation\nexport const emptyFile: CheckFunction = (schema, context) => {\n if (context.file.size === 0) {\n context.issues.addNonSchemaIssue('EMPTY_FILE', [context.file])\n }\n return Promise.resolve()\n}\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 29, "blob_id": "42f8aefca912cd0c32a1bb1789c9427c379f99dd", "content_id": "d5677fadb498d4d76ca6eae76e758e9d3612b7e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 30, "license_type": "permissive", "max_line_length": 29, "num_lines": 1, "path": "/bids-validator/src/tests/README.md", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "Support files for test suite.\n" }, { "alpha_fraction": 0.510948896408081, "alphanum_fraction": 0.5133820176124573, "avg_line_length": 26.399999618530273, "blob_id": "ef09c5d65cf61a86c317677d27cae44020f27472", "content_id": "c383f7e4853f7f8f4c5c15209dbfc1e85d60cad8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1233, "license_type": "permissive", "max_line_length": 80, "num_lines": 45, "path": "/bids-validator/utils/files/__tests__/readDir.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import readDir from '../readDir.js'\n\ndescribe('readDir.js', () => {\n describe('fileArrayToObject', () => {\n it('transforms an array to an object', () => {\n expect(\n readDir.fileArrayToObject([\n { name: 'one' },\n { name: 'two' },\n { name: 'three' },\n ]),\n ).toEqual({\n 0: {\n name: 'one',\n },\n 1: {\n name: 'two',\n },\n 2: {\n name: 'three',\n },\n })\n })\n })\n describe('harmonizeRelativePath', () => {\n it('harmonizes a basic POSIX path', () => {\n expect(readDir.harmonizeRelativePath('test/a/path')).toEqual('/a/path')\n })\n it('does not mangle absolute Windows paths', () => {\n expect(readDir.harmonizeRelativePath('C:\\\\dataset\\\\directory')).toEqual(\n '/dataset/directory',\n )\n })\n it('does not mangle relative Windows paths', () => {\n expect(readDir.harmonizeRelativePath('dataset\\\\directory')).toEqual(\n '/directory',\n )\n })\n it('does not mangle relative Windows paths with parent directories', () => {\n expect(\n readDir.harmonizeRelativePath('..\\\\..\\\\dataset\\\\directory'),\n ).toEqual('/../dataset/directory')\n })\n })\n})\n" }, { "alpha_fraction": 0.6057945489883423, "alphanum_fraction": 0.6215978860855103, "avg_line_length": 31.542856216430664, "blob_id": "8ad8e98adf3589b18bea14a12880e590a2f92f21", "content_id": "b9ecf92430ece080fdef33b170611ac260ff8b36", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1139, "license_type": "permissive", "max_line_length": 101, "num_lines": 35, "path": "/bids-validator/validators/nifti/__tests__/duplicateFiles.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport duplicateNiftis from '../duplicateFiles'\n\ndescribe('duplicateFiles', () => {\n describe('duplicateNiftis()', () => {\n const file1nii = { name: 'file1.nii' }\n const file1gz = { name: 'file1.nii.gz' }\n const file2nii = { name: 'file2.nii' }\n const file2gz = { name: 'file2.nii.gz' }\n\n it('throws no issues if there are no nifti files', () => {\n const files = []\n const issues = duplicateNiftis(files)\n assert.lengthOf(issues, 0)\n })\n\n it('allows nifti files with distinct names and extensions', () => {\n const files = [file1nii, file2gz]\n const issues = duplicateNiftis(files)\n assert.lengthOf(issues, 0)\n })\n\n it('allows nifti files with distinct names and the same extension', () => {\n const files = [file1nii, file2nii]\n const issues = duplicateNiftis(files)\n assert.lengthOf(issues, 0)\n })\n\n it('throws an error if a the same filename with .nii and .nii.gz extensions are present', () => {\n const files = [file1gz, file1nii]\n const issues = duplicateNiftis(files)\n assert.lengthOf(issues, 2)\n })\n })\n})\n" }, { "alpha_fraction": 0.6447457075119019, "alphanum_fraction": 0.6466502547264099, "avg_line_length": 35.88429641723633, "blob_id": "6269a8036141bc34e328a2b07498b9edf68f4afd", "content_id": "a309f6a74469bde43735fa6337574a0ec138cba6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 8926, "license_type": "permissive", "max_line_length": 123, "num_lines": 242, "path": "/bids-validator/utils/files/__tests__/remoteFiles.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport remoteFiles from '../remoteFiles'\nimport fs from 'fs'\nimport zlib from 'zlib'\nconst config = {\n s3Params: {\n Bucket: 'none',\n },\n file: {\n name: 'something',\n },\n}\n\ndescribe('remoteFiles', () => {\n beforeAll(() => {\n // fetch mock\n global.fetch = jest\n .fn()\n .mockImplementation(() =>\n Promise.resolve({ ok: true, buffer: () => 'buffer' }),\n )\n })\n\n beforeEach(() => {\n delete process.env.AWS_ACCESS_KEY_ID\n })\n\n describe('accessRemoteFile', () => {\n it('should return a promise', () => {\n const promise = remoteFiles.accessRemoteFile(config)\n expect(promise).toBeInstanceOf(Promise)\n })\n it('should return the response of constructAwsRequest if successful', () => {\n remoteFiles\n .accessRemoteFile(config)\n .then((res) => expect(res).toBe('buffer'))\n })\n it('should return the issue of extractGzipBuffer if unzip is unsuccessful', () => {\n config.file.name = 'something.gz'\n return remoteFiles.accessRemoteFile(config).catch((issue) => {\n expect(issue).toHaveProperty('code')\n config.file.name = 'something'\n })\n })\n })\n\n describe('constructAwsRequest', () => {\n it('should return a fetch resolution promise when aws creds are not present', async () => {\n const response = remoteFiles.constructAwsRequest({\n s3Params: { Bucket: 'matters not' },\n })\n expect(response).toBeInstanceOf(Promise)\n })\n it('should return the buffer() property of the fetch response', async () => {\n remoteFiles\n .constructAwsRequest({\n s3Params: { Bucket: 'matters not' },\n })\n .then((data) => {\n assert.equal(data, 'buffer')\n })\n })\n })\n\n describe('extractGzipBuffer', () => {\n it('should extract proper gzip files', async () => {\n zlib.gzip('Some String', async (err, res) => {\n const gzip = await remoteFiles.extractGzipBuffer(res, {})\n expect(gzip).toBeInstanceOf(Uint8Array)\n })\n })\n it('should reject with an issue when gzip reading fails', async () => {\n try {\n const zip = 'bad data'\n await remoteFiles.extractGzipBuffer(zip, {})\n } catch (e) {\n expect(e).toHaveProperty('code')\n expect(e.code).toEqual(28)\n }\n })\n })\n\n describe('callGitAnnex', () => {\n it('should return the string result of execSync', () => {\n const resp = remoteFiles.callGitAnnex('echo test')\n expect(resp.trim()).toBe('test')\n })\n })\n\n describe('getRemotesInfo', () => {\n it('should return an empty array if callGitAnnex does not return contents of a metadata file', () => {\n remoteFiles.callGitAnnex = jest.fn()\n remoteFiles.callGitAnnex.mockReturnValue('bad_response')\n const remotesInfo = remoteFiles.getRemotesInfo('some_directory', {\n relativePath: 'some_file',\n })\n assert.lengthOf(remotesInfo, 0)\n })\n it('should return an empty array if file is not properly formatted', () => {\n const remotesInfo = remoteFiles.getRemotesInfo('some_directory', {})\n assert.lengthOf(remotesInfo, 0)\n })\n it('should return an empty array if directory is not properly formatted', () => {\n const remotesInfo = remoteFiles.getRemotesInfo('bad directory', {\n relativePath: 'some_path',\n })\n assert.lengthOf(remotesInfo, 0)\n })\n it('should return an array of remote objects if getRemoteData returns properly formatted remote metadata file', () => {\n remoteFiles.getRemoteMetadata = jest.fn()\n remoteFiles.getRemoteMetadata.mockReturnValue(\n 'timestamp remoteuuid:commitinfo xversionId#fileName',\n )\n const remotesInfo = remoteFiles.getRemotesInfo('some_directory', {\n relativePath: 'some_file',\n })\n remoteFiles.getRemoteMetadata.mockRestore()\n assert.lengthOf(remotesInfo, 1)\n })\n })\n\n describe('getSingleRemoteInfo', () => {\n it('returns an object with null Bucket property if the response does not contain remote info', () => {\n remoteFiles.callGitAnnex = jest.fn()\n remoteFiles.callGitAnnex.mockReturnValue('bad_response')\n const singleRemoteInfo = remoteFiles.getSingleRemoteInfo(\n 'some_dir',\n 'some_uuid',\n )\n expect(singleRemoteInfo).toHaveProperty('Bucket')\n expect(singleRemoteInfo.Bucket).toBe(null)\n })\n it('returns an object with a Bucket property if callGitAnnex returns an object with the Bucket field', () => {\n remoteFiles.callGitAnnex = jest.fn()\n remoteFiles.callGitAnnex.mockReturnValue(\n 'good_response\\nbucket: such_bucket\\nawesome_line',\n )\n const singleRemoteInfo = remoteFiles.getSingleRemoteInfo(\n 'some_dir',\n 'some_uuid',\n )\n expect(singleRemoteInfo).toHaveProperty('Bucket')\n expect(singleRemoteInfo.Bucket).toEqual('such_bucket')\n })\n })\n\n describe('getRemoteBucket', () => {\n it('returns an object with a Bucket property if the response contains that field', () => {\n const resp = 'something:something\\nbucket: omg\\nawesome:awesome'\n const params = remoteFiles.getRemoteBucket(resp)\n expect(params).toHaveProperty('Bucket')\n expect(params.Bucket).toEqual('omg')\n })\n it('returns an object with null Bucket property if the response does not contain the bucket field', () => {\n const resp = 'wow_this_is_a_bad_response'\n const params = remoteFiles.getRemoteBucket(resp)\n expect(params).toHaveProperty('Bucket')\n expect(params.Bucket).toBe(null)\n })\n })\n\n describe('processRemoteMetadata', () => {\n it('properly parses a git-annex remote metadata file', () => {\n const resp = 'timestamp remoteuuid:commitinfo xversionId#fileName'\n const remotesInfo = remoteFiles.processRemoteMetadata(resp)\n assert.lengthOf(remotesInfo, 1)\n const remoteObj = remotesInfo[0]\n expect(remoteObj).toHaveProperty('timestamp')\n expect(remoteObj.timestamp).toEqual('timestamp')\n expect(remoteObj).toHaveProperty('remoteUuid')\n expect(remoteObj.remoteUuid).toEqual('remoteuuid')\n expect(remoteObj).toHaveProperty('fileName')\n expect(remoteObj.fileName).toEqual('fileName')\n expect(remoteObj).toHaveProperty('versionId')\n expect(remoteObj.versionId).toEqual('versionId')\n })\n it('returns an empty array if there is an improperly formatted metadata file', () => {\n let remotesInfo\n const no_spaces = 'poorly_formatted_response' // contains no spaces\n remotesInfo = remoteFiles.processRemoteMetadata(no_spaces)\n assert.lengthOf(remotesInfo, 0)\n const not_enough_items = 'one two' // does not contain enough \"columns\"\n remotesInfo = remoteFiles.processRemoteMetadata(not_enough_items)\n assert.lengthOf(remotesInfo, 0)\n\n // does not have the properly one two:three xfour#five format\n const not_properly_formatted = 'one two:three four'\n remotesInfo = remoteFiles.processRemoteMetadata(not_properly_formatted)\n assert.lengthOf(remotesInfo, 0)\n const not_the_right_separators = 'one two:three xfour:five'\n remotesInfo = remoteFiles.processRemoteMetadata(not_the_right_separators)\n assert.lengthOf(remotesInfo, 0)\n })\n it('returns objects corresponding to any properly formatted line', () => {\n const one_line_right =\n 'properly formatted:response xwith#a\\nline_that_is_not_properly_formatted'\n const remotesInfo = remoteFiles.processRemoteMetadata(one_line_right)\n assert.lengthOf(remotesInfo, 1)\n })\n })\n\n describe('isGitAnnex', () => {\n it('returns false when fs.existsSync returns false', () => {\n fs.existsSync = jest.fn()\n fs.existsSync.mockReturnValue(false)\n const isGitAnnex = remoteFiles.isGitAnnex('some-path')\n expect(fs.existsSync).toHaveBeenCalled()\n expect(isGitAnnex).toBe(false)\n })\n it('returns true when fs.existsSync returns true', () => {\n fs.existsSync = jest.fn()\n fs.existsSync.mockReturnValue(true)\n const isGitAnnex = remoteFiles.isGitAnnex('some-path')\n expect(fs.existsSync).toHaveBeenCalled()\n expect(isGitAnnex).toBe(true)\n })\n })\n\n describe('tryRemote', () => {\n it('should resolve with the results of accessRemoteFile', (done) => {\n remoteFiles.getSingleRemoteInfo = jest.fn()\n remoteFiles.getSingleRemoteInfo.mockReturnValue({ Bucket: 'wow' })\n remoteFiles.accessRemoteFile = jest.fn()\n remoteFiles.accessRemoteFile.mockReturnValue(Promise.resolve('data'))\n remoteFiles\n .tryRemote(\n {},\n { dir: 'directory', file: { relativePath: 'wow', name: 'name' } },\n )\n .then((data) => {\n expect(data)\n done()\n })\n .catch(done)\n })\n })\n // reset the fs object back to its normal state\n // so we dont break jest\n afterAll(() => {\n fs.existsSync.mockRestore()\n })\n})\n" }, { "alpha_fraction": 0.7829861044883728, "alphanum_fraction": 0.796875, "avg_line_length": 29.3157901763916, "blob_id": "b6c3d2c882b5cfe4350a0f013b3a6abda45f26c9", "content_id": "8135da0ea8f03ee2e942d2d1f205e02629ef3cf5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 576, "license_type": "permissive", "max_line_length": 54, "num_lines": 19, "path": "/bids-validator/validators/tsv/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/* eslint-disable no-unused-vars */\nimport TSV from './tsv'\n\nimport checkPhenotype from './checkPhenotype'\nimport validateTsvColumns from './validateTsvColumns'\nimport validate from './validate'\nimport checkAge89 from './checkAge89'\nimport checkAcqTimeFormat from './checkAcqTimeFormat'\nimport validateContRec from './validateContRecordings'\n\nexport default {\n TSV: TSV,\n checkPhenotype: checkPhenotype,\n validateTsvColumns: validateTsvColumns,\n validate: validate,\n checkAge89: checkAge89,\n checkAcqTimeFormat: checkAcqTimeFormat,\n validateContRec: validateContRec,\n}\n" }, { "alpha_fraction": 0.6206434369087219, "alphanum_fraction": 0.6318141222000122, "avg_line_length": 32.402984619140625, "blob_id": "a2cf53c30b6e9cb09f19fe80f04eb0ba2fedfa6f", "content_id": "6e63bf7e95a56b60c13a2096f3001450b6074674", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2238, "license_type": "permissive", "max_line_length": 78, "num_lines": 67, "path": "/bids-validator/src/files/browser.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { FileIgnoreRules } from './ignore.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { assertEquals } from '../deps/asserts.ts'\nimport { BIDSFileBrowser, fileListToTree } from './browser.ts'\n\nclass TestFile extends File {\n webkitRelativePath: string\n constructor(\n fileBits: BlobPart[],\n fileName: string,\n webkitRelativePath: string,\n options?: FilePropertyBag | undefined,\n ) {\n super(fileBits, fileName, options)\n this.webkitRelativePath = webkitRelativePath\n }\n}\n\nDeno.test('Browser implementation of FileTree', async (t) => {\n await t.step('converts a basic FileList', async () => {\n const ignore = new FileIgnoreRules([])\n const files = [\n new TestFile(\n ['{}'],\n 'dataset_description.json',\n 'dataset_description.json',\n ),\n new TestFile(['flat test dataset'], 'README.md', 'README.md'),\n ]\n const tree = await fileListToTree(files)\n const expectedTree = new FileTree('', '/', undefined)\n expectedTree.files = files.map((f) => new BIDSFileBrowser(f, ignore))\n assertEquals(tree, expectedTree)\n })\n await t.step('converts a simple FileList with several levels', async () => {\n const ignore = new FileIgnoreRules([])\n const files = [\n new TestFile(\n ['{}'],\n 'dataset_description.json',\n 'dataset_description.json',\n ),\n new TestFile(\n ['tsv headers\\n', 'column\\tdata'],\n 'participants.tsv',\n 'participants.tsv',\n ),\n new TestFile(['single subject test dataset'], 'README.md', 'README.md'),\n new TestFile(\n ['nifti file goes here'],\n 'sub-01_T1w.nii.gz',\n 'sub-01/anat/sub-01_T1w.nii.gz',\n ),\n ]\n const tree = await fileListToTree(files)\n const expectedTree = new FileTree('', '/', undefined)\n const sub01Tree = new FileTree('sub-01', 'sub-01', expectedTree)\n const anatTree = new FileTree('sub-01/anat', 'anat', sub01Tree)\n expectedTree.files = files\n .slice(0, 3)\n .map((f) => new BIDSFileBrowser(f, ignore))\n expectedTree.directories.push(sub01Tree)\n anatTree.files = [new BIDSFileBrowser(files[3], ignore)]\n sub01Tree.directories.push(anatTree)\n assertEquals(tree, expectedTree)\n })\n})\n" }, { "alpha_fraction": 0.6019341349601746, "alphanum_fraction": 0.6063774228096008, "avg_line_length": 27.340740203857422, "blob_id": "592ec87406869cf9b034b7d0b6bec5b6866402c8", "content_id": "2072abbe3c7945554a1b1cee5bdeb6732bf0285b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3826, "license_type": "permissive", "max_line_length": 91, "num_lines": 135, "path": "/bids-validator/validators/microscopy/checkJSONAndField.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\n\nconst checkJSONAndField = (files, jsonContentsDict, fileList) => {\n let issues = []\n if (files.ome) {\n files.ome.forEach((file) => {\n let possibleJsonPath = file.relativePath\n .replace('.tif', '')\n .replace('.btf', '')\n .replace('.ome', '.json')\n issues = issues.concat(\n ifJsonExist(file, possibleJsonPath, jsonContentsDict, fileList),\n )\n })\n }\n if (files.png) {\n files.png.forEach((file) => {\n let possibleJsonPath = file.relativePath.replace('.png', '.json')\n issues = issues.concat(\n ifJsonExist(file, possibleJsonPath, jsonContentsDict, fileList),\n )\n })\n }\n if (files.tif) {\n files.tif.forEach((file) => {\n let possibleJsonPath = file.relativePath.replace('.tif', '.json')\n issues = issues.concat(\n ifJsonExist(file, possibleJsonPath, jsonContentsDict, fileList),\n )\n })\n }\n if (files.jpg) {\n files.jpg.forEach((file) => {\n let possibleJsonPath = file.relativePath.replace('.jpg', '.json')\n issues = issues.concat(\n ifJsonExist(file, possibleJsonPath, jsonContentsDict, fileList),\n )\n })\n }\n return issues\n}\n\nconst ifJsonExist = (file, possibleJsonPath, jsonContentsDict, fileList) => {\n let potentialSidecars = utils.files.potentialLocations(possibleJsonPath)\n const chunkRegex = new RegExp('_chunk-[0-9]+')\n\n const jsonChunkFiles = potentialSidecars.filter(\n (name) => jsonContentsDict.hasOwnProperty(name) && chunkRegex.exec(name),\n )\n const chunkPresent =\n jsonChunkFiles.length || chunkRegex.exec(file.relativePath)\n\n const mergedDictionary = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n\n if (utils.type.file.isMicroscopyPhoto(file.relativePath)) {\n if (mergedDictionary.hasOwnProperty('IntendedFor')) {\n const intendedFor =\n typeof mergedDictionary['IntendedFor'] == 'string'\n ? [mergedDictionary['IntendedFor']]\n : mergedDictionary['IntendedFor']\n return checkIfIntendedExists(intendedFor, fileList, file)\n }\n } else {\n // check if the given file has a corresponding JSON file\n if (Object.keys(mergedDictionary).length === 0) {\n return [\n new Issue({\n file: file,\n code: 225,\n }),\n ]\n }\n\n if (chunkPresent) {\n return checkMatrixField(file, mergedDictionary)\n }\n }\n return []\n}\n\nconst checkMatrixField = (file, mergedDictionary) => {\n let issues = []\n if (!mergedDictionary.hasOwnProperty('ChunkTransformationMatrix')) {\n issues.push(\n new Issue({\n file: file,\n code: 223,\n }),\n )\n }\n return issues\n}\n\nconst checkIfIntendedExists = (intendedFor, fileList, file) => {\n let issues = []\n for (let key = 0; key < intendedFor.length; key++) {\n const intendedForFile = intendedFor[key]\n const intendedForFileFull =\n '/' + file.relativePath.split('/')[1] + '/' + intendedForFile\n let onTheList = false\n for (let key2 in fileList) {\n if (key2) {\n const filePath = fileList[key2].relativePath\n if (filePath === intendedForFileFull) {\n onTheList = true\n }\n }\n }\n if (!onTheList) {\n issues.push(\n new Issue({\n file: file,\n code: 37,\n reason:\n \"'IntendedFor' property of this photo ('\" +\n file.relativePath +\n \"') does not point to an existing file ('\" +\n intendedForFile +\n \"'). Please mind that this value should not include subject level directory \" +\n \"('/\" +\n file.relativePath.split('/')[1] +\n \"/').\",\n evidence: intendedForFile,\n }),\n )\n }\n }\n return issues\n}\n\nexport default checkJSONAndField\n" }, { "alpha_fraction": 0.6786248087882996, "alphanum_fraction": 0.6786248087882996, "avg_line_length": 19.06999969482422, "blob_id": "13a2922a9c32e2f46299de3cea6c77a79296601a", "content_id": "1568a36d37f80cb061e7179181ee64440b2df944", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2007, "license_type": "permissive", "max_line_length": 85, "num_lines": 100, "path": "/bids-validator/src/types/issues.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { BIDSFile } from './file.ts'\n\nexport type Severity = 'warning' | 'error' | 'ignore'\n\nexport interface IssueFileDetail {\n name: string\n path: string\n relativePath: string\n}\n\nexport interface IssueFileOutput {\n key: string\n code: number\n file: IssueFileDetail\n evidence: string\n line: number\n character: number\n severity: Severity\n reason: string\n helpUrl: string\n}\n\n/**\n * Dataset issue, derived from OpenNeuro schema and existing validator implementation\n */\nexport interface IssueOutput {\n severity: Severity\n key: string\n code: number\n reason: string\n files: IssueFileOutput[]\n additionalFileCount: number\n helpUrl: string\n}\n\n/**\n * Shape returned by fullTest call in non-schema validator\n */\nexport interface FullTestIssuesReturn {\n errors: IssueOutput[]\n warnings: IssueOutput[]\n}\n\n/**\n * For defining internal issues quickly\n */\nexport interface IssueDefinition {\n severity: Severity\n reason: string\n}\nexport type IssueDefinitionRecord = Record<string, IssueDefinition>\n\n/**\n * File allowing extra context for the issue found\n */\nexport type IssueFile = Omit<BIDSFile, 'readBytes'> & {\n evidence?: string\n line?: number\n character?: number\n}\n\n/**\n * Updated internal Issue structure for schema based validation\n */\nexport class Issue {\n key: string\n severity: Severity\n reason: string\n files: Map<string, IssueFile>\n\n constructor({\n key,\n severity,\n reason,\n files,\n }: {\n key: string\n severity: Severity\n reason: string\n files: Map<string, IssueFile> | IssueFile[]\n }) {\n this.key = key\n this.severity = severity\n this.reason = reason\n // We want to be able to easily look up by path, so turn IssueFile[] into a Map\n if (Array.isArray(files)) {\n this.files = new Map()\n for (const f of files) {\n this.files.set(f.path, f)\n }\n } else {\n this.files = files\n }\n }\n\n get helpUrl(): string {\n // Provide a link to NeuroStars\n return `https://neurostars.org/search?q=${this.key}`\n }\n}\n" }, { "alpha_fraction": 0.5654114484786987, "alphanum_fraction": 0.5790795087814331, "avg_line_length": 17.86842155456543, "blob_id": "6359478f0fd2c92c2f08f229aafcb0631cd7f537", "content_id": "1f13348dbff4747a19ba0bc114f0088d526d36cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3591, "license_type": "permissive", "max_line_length": 104, "num_lines": 190, "path": "/bids-validator/utils/unit.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const roots = [\n 'metre',\n 'm',\n 'kilogram',\n 'kg',\n 'second',\n 's',\n 'ampere',\n 'A',\n 'kelvin',\n 'K',\n 'mole',\n 'mol',\n 'candela',\n 'cd',\n 'radian',\n 'rad',\n 'steradian',\n 'sr',\n 'hertz',\n 'Hz',\n 'newton',\n 'N',\n 'pascal',\n 'Pa',\n 'joule',\n 'J',\n 'watt',\n 'W',\n 'coulomb',\n 'C',\n 'volt',\n 'V',\n 'farad',\n 'F',\n 'ohm',\n 'Ω',\n 'siemens',\n 'S',\n 'weber',\n 'Wb',\n 'tesla',\n 'T',\n 'henry',\n 'H',\n 'degree',\n 'Celsius',\n '°C',\n 'lumen',\n 'lm',\n 'lux',\n 'lx',\n 'becquerel',\n 'Bq',\n 'gray',\n 'Gy',\n 'sievert',\n 'Sv',\n 'katal',\n 'kat',\n]\nconst prefixes = [\n // multiples\n 'deca',\n 'da',\n 'hecto',\n 'h',\n 'kilo',\n 'k',\n 'mega',\n 'M',\n 'giga',\n 'G',\n 'tera',\n 'T',\n 'peta',\n 'P',\n 'exa',\n 'E',\n 'zetta',\n 'Z',\n 'yotta',\n 'Y',\n // sub-multiples\n 'deci',\n 'd',\n 'centi',\n 'c',\n 'milli',\n 'm',\n 'micro',\n 'µ',\n 'nano',\n 'n',\n 'pico',\n 'p',\n 'femto',\n 'f',\n 'atto',\n 'a',\n 'zepto',\n 'z',\n 'yocto',\n 'y',\n]\nconst unitOperators = ['/', '*', '⋅']\nconst exponentOperator = '^'\nconst operators = [...unitOperators, exponentOperator]\n\n// from 0-9\nconst superscriptNumbers = [\n '\\u2070',\n '\\u00B9',\n '\\u00B2',\n '\\u00B3',\n '\\u2074',\n '\\u2075',\n '\\u2076',\n '\\u2077',\n '\\u2078',\n '\\u2079',\n]\nconst superscriptNegative = '\\u207B'\n\nconst start = '^'\nconst end = '$'\nconst prefix = `(${prefixes.join('|')})?`\nconst root = `(${roots.join('|')})`\nconst superscriptExp = `(${superscriptNegative}?[${superscriptNumbers}]+)?`\nconst operatorExp = `(\\\\^-?[0-9]+)?`\nconst unitWithExponentPattern = new RegExp(\n `${start}${prefix}${root}(${superscriptExp}|${operatorExp})${end}`,\n)\n\nconst unitOperatorPattern = new RegExp(`[${unitOperators.join('')}]`)\n\nconst isUnavailable = (unit) => unit.trim().toLowerCase() === 'n/a'\nconst isPercent = (unit) => unit.trim() === '%'\n\n/* Validate currently not used, out of line with specification:\n * https://github.com/bids-standard/bids-specification/pull/411\n * Once updated to use cmixf uncomment section in tsv validator that\n * calls this function, remove this comment, and uncomment test in tests/tsv.spec.js\n */\n/**\n * validate\n *\n * Checks that the SI unit given is valid.\n * Whitespace characters are not supported.\n * Unit must include at least one root unit of measuremnt.\n * Multiple root units must be separated by one of the operators '/' (division) or '*' (multiplication).\n * Each root unit may or may not pre preceded by a multiplier prefix,\n * and may or may not be followed by an exponent.\n * Exponents may only be to integer powers,\n * and may be formatted as either unicode superscript numbers,\n * or as integers following the '^' operator.\n *\n * @param {string} derivedUnit - a simple or complex SI unit\n * @returns {object} - { isValid, evidence }\n */\nconst validate = (derivedUnit) => {\n if (isUnavailable(derivedUnit) || isPercent(derivedUnit)) {\n return { isValid: true, evidence: '' }\n } else {\n const separatedUnits = derivedUnit\n .split(unitOperatorPattern)\n .map((str) => str.trim())\n const invalidUnits = separatedUnits.filter(\n (unit) => !unitWithExponentPattern.test(unit),\n )\n\n const isValid = invalidUnits.length === 0\n const evidence = isValid\n ? ''\n : `Subunit${invalidUnits.length === 1 ? '' : 's'} (${invalidUnits.join(\n ', ',\n )}) of unit ${derivedUnit} is invalid. `\n\n return { isValid, evidence }\n }\n}\n\nexport { roots, prefixes, superscriptNumbers, operators, validate }\nexport default {\n roots,\n prefixes,\n superscriptNumbers,\n operators,\n validate,\n}\n" }, { "alpha_fraction": 0.6132596731185913, "alphanum_fraction": 0.6574585437774658, "avg_line_length": 19.11111068725586, "blob_id": "c7ac98f4de77a60320a9eb179c7f78fdead4f1d7", "content_id": "a1199c5207b9ecce37bf71d46ccf443bd579037e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 181, "license_type": "permissive", "max_line_length": 58, "num_lines": 9, "path": "/bids-validator/src/deps/cliffy.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export {\n Cell,\n Row,\n Table,\n} from 'https://deno.land/x/[email protected]/table/mod.ts'\nexport {\n Command,\n EnumType,\n} from 'https://deno.land/x/[email protected]/command/mod.ts'\n" }, { "alpha_fraction": 0.40468037128448486, "alphanum_fraction": 0.41152969002723694, "avg_line_length": 28.694915771484375, "blob_id": "2d01cc6634332fea9f83746e37aceaa133b93be7", "content_id": "ecb6532067d8f886ef7d3b0ed8ae215364b478c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1752, "license_type": "permissive", "max_line_length": 80, "num_lines": 59, "path": "/bids-validator-web/components/Summary.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// dependencies -------------------------------------------------------\n\nimport React from 'react'\nimport pluralize from 'pluralize'\nimport bytes from 'bytes'\nimport List from './List'\n\nclass Summary extends React.Component {\n // life cycle events --------------------------------------------------\n\n render() {\n let summary = this.props.summary\n if (summary) {\n var numSessions =\n summary.sessions.length > 0 ? summary.sessions.length : 1\n return (\n <div>\n <h3>{this.props.dirName}</h3>\n <div className=\"card container my-3\">\n <div className=\"card-header row summary p-4\">\n <div className=\"col-sm\">\n <h5>Summary</h5>\n <ul>\n <li>\n {summary.totalFiles} {pluralize('File', summary.totalFiles)}\n , {bytes(summary.size)}\n </li>\n <li>\n {summary.subjects.length} -{' '}\n {pluralize('Subject', summary.subjects.length)}\n </li>\n <li>\n {numSessions} - {pluralize('Session', numSessions)}\n </li>\n </ul>\n </div>\n <div className=\"col-sm\">\n <h5>Available Tasks</h5>\n <ul>\n <List items={summary.tasks} />\n </ul>\n </div>\n <div className=\"col-sm\">\n <h5>Available Modalities</h5>\n <ul>\n <List items={summary.modalities} />\n </ul>\n </div>\n </div>\n </div>\n </div>\n )\n } else {\n return null\n }\n }\n}\n\nexport default Summary\n" }, { "alpha_fraction": 0.48610755801200867, "alphanum_fraction": 0.4899311661720276, "avg_line_length": 25.687074661254883, "blob_id": "b6b55f71c03173989150afa4061295bbb31f6b80", "content_id": "7d2c5cec0c6151dc87e2e875286e490259433b28", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3923, "license_type": "permissive", "max_line_length": 79, "num_lines": 147, "path": "/bids-validator-web/components/results/Issues.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// dependencies -----------------------------------------------------------\n\nimport React from 'react'\nimport PropTypes from 'prop-types'\nimport { Card, Collapse } from 'react-bootstrap'\nimport pluralize from 'pluralize'\nimport Issue from './Issue'\n\n// component setup --------------------------------------------------------\n\nclass Issues extends React.Component {\n constructor() {\n super()\n this.state = {\n showMore: [],\n errorOpen: false,\n }\n }\n\n // life cycle events ------------------------------------------------------\n\n render() {\n let self = this\n let issueFiles = this.props.issues\n let issues = issueFiles.map((issue, index) => {\n let files = issue.files\n if (this.state.showMore.indexOf(index) === -1) {\n files = issue.files.slice(0, 10)\n }\n\n // issue sub-errors\n let hasErrorFiles = false\n let subErrors = files.map(function (error, index2) {\n if (error && error.file) {\n hasErrorFiles = true\n return (\n <Issue\n type={self.props.issueType}\n error={error}\n index={index2}\n key={index2}\n />\n )\n }\n })\n\n // issue card\n if (hasErrorFiles) {\n return (\n <Card className=\"validation-error fadeIn\" key={index}>\n <Card.Header\n className=\"error-header\"\n aria-expanded={this.state.errorOpen}\n aria-controls={'error_' + index}\n onClick={() =>\n this.setState({ errorOpen: !this.state.errorOpen })\n }>\n {this._header(issue, index, this.props.issueType, hasErrorFiles)}\n </Card.Header>\n <Collapse in={this.state.errorOpen}>\n <Card.Body id={'error_' + index}>\n {subErrors}\n {this._viewMore(issue.files, index)}\n </Card.Body>\n </Collapse>\n </Card>\n )\n } else {\n return (\n <div className=\"panel panel-default\" key={index}>\n <div className=\"panel-heading\">\n {this._header(issue, index, this.props.issueType, hasErrorFiles)}\n </div>\n </div>\n )\n }\n })\n return <div>{issues}</div>\n }\n\n // template methods -------------------------------------------------------\n\n _header(issue, index, type, hasErrorFiles) {\n let issueCount = pluralize('files', issue.files.length)\n let fileCount\n if (hasErrorFiles) {\n fileCount = (\n <span className=\"pull-right\">\n {issue.files.length} {issueCount}\n </span>\n )\n }\n return (\n <span className=\"panel-title file-header\">\n <h4 className=\"em-header clearfix\">\n <strong className=\"em-header pull-left\">\n {type} {index + 1}: [Code {issue.code}] {issue.key}\n </strong>\n </h4>\n {this._issueLink(issue)}\n {issue.reason}\n {fileCount}\n </span>\n )\n }\n\n _issueLink(issue) {\n if (issue && issue.helpUrl) {\n return (\n <p>\n <a target=\"_blank\" href={issue.helpUrl}>\n Click here for more information about this issue\n </a>\n </p>\n )\n } else {\n return null\n }\n }\n\n _viewMore(files, index) {\n if (this.state.showMore.indexOf(index) === -1 && files.length > 10) {\n return (\n <div\n className=\"issues-view-more\"\n onClick={this._showMore.bind(this, index)}>\n <button>View {files.length - 10} more files</button>\n </div>\n )\n }\n }\n\n // custom methods ---------------------------------------------------------\n\n _showMore(index) {\n let showMore = this.state.showMore\n showMore.push(index)\n this.setState({ showMore })\n }\n}\n\nIssues.propTypes = {\n issues: PropTypes.array.isRequired,\n issueType: PropTypes.string.isRequired,\n}\n\nexport default Issues\n" }, { "alpha_fraction": 0.6317154765129089, "alphanum_fraction": 0.6326417326927185, "avg_line_length": 25.59113311767578, "blob_id": "2b51d82a4fc33429e7f536204c85dd949f1ab712", "content_id": "364e1ff54ebc0df8b11bbba8e1d8793cc0acfa2d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 5398, "license_type": "permissive", "max_line_length": 76, "num_lines": 203, "path": "/bids-validator/src/schema/context.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import {\n Context,\n ContextDataset,\n ContextDatasetSubjects,\n ContextSubject,\n ContextAssociations,\n ContextNiftiHeader,\n} from '../types/context.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { BIDSEntities, readEntities } from './entities.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { parseTSV } from '../files/tsv.ts'\nimport { loadHeader } from '../files/nifti.ts'\nimport { buildAssociations } from './associations.ts'\nimport { ValidatorOptions } from '../setup/options.ts'\nimport { logger } from '../utils/logger.ts'\n\nexport class BIDSContextDataset implements ContextDataset {\n dataset_description: Record<string, unknown>\n options?: ValidatorOptions\n files: any[]\n tree: object\n ignored: any[]\n modalities: any[]\n subjects: ContextDatasetSubjects[]\n\n constructor(options?: ValidatorOptions, description = {}) {\n this.dataset_description = description\n this.files = []\n this.tree = {}\n this.ignored = []\n this.modalities = []\n this.subjects = [] as ContextDatasetSubjects[]\n if (options) {\n this.options = options\n }\n if (\n !this.dataset_description.DatasetType &&\n this.dataset_description.GeneratedBy\n ) {\n this.dataset_description.DatasetType = 'derivative'\n } else if (!this.dataset_description.DatasetType) {\n this.dataset_description.DatasetType = 'raw'\n }\n }\n}\n\nconst defaultDsContext = new BIDSContextDataset()\n\nexport class BIDSContext implements Context {\n // Internal representation of the file tree\n fileTree: FileTree\n filenameRules: string[]\n issues: DatasetIssues\n file: BIDSFile\n suffix: string\n extension: string\n entities: Record<string, string>\n dataset: ContextDataset\n subject: ContextSubject\n datatype: string\n modality: string\n sidecar: object\n columns: Record<string, string[]>\n associations: ContextAssociations\n nifti_header?: ContextNiftiHeader\n\n constructor(\n fileTree: FileTree,\n file: BIDSFile,\n issues: DatasetIssues,\n dsContext?: BIDSContextDataset,\n ) {\n this.fileTree = fileTree\n this.filenameRules = []\n this.issues = issues\n this.file = file\n const bidsEntities = readEntities(file.name)\n this.suffix = bidsEntities.suffix\n this.extension = bidsEntities.extension\n this.entities = bidsEntities.entities\n this.dataset = dsContext ? dsContext : defaultDsContext\n this.subject = {} as ContextSubject\n this.datatype = ''\n this.modality = ''\n this.sidecar = {}\n this.columns = {}\n this.associations = {} as ContextAssociations\n }\n\n get json(): Promise<Record<string, any>> {\n return this.file\n .text()\n .then((text) => JSON.parse(text))\n .catch((error) => {})\n }\n get path(): string {\n return this.file.path\n }\n\n /**\n * Implementation specific absolute path for the dataset root\n *\n * In the browser, this is always at the root\n */\n get datasetPath(): string {\n return this.fileTree.path\n }\n\n /**\n * Crawls fileTree from root to current context file, loading any valid\n * json sidecars found.\n */\n async loadSidecar(fileTree?: FileTree) {\n if (!fileTree) {\n fileTree = this.fileTree\n }\n const validSidecars = fileTree.files.filter((file) => {\n const { suffix, extension, entities } = readEntities(file.name)\n return (\n extension === '.json' &&\n suffix === this.suffix &&\n Object.keys(entities).every((entity) => {\n return (\n entity in this.entities &&\n entities[entity] === this.entities[entity]\n )\n })\n )\n })\n\n if (validSidecars.length > 1) {\n const exactMatch = validSidecars.find(\n (sidecar) =>\n sidecar.path == this.file.path.replace(this.extension, '.json'),\n )\n if (exactMatch) {\n validSidecars.splice(1)\n validSidecars[0] = exactMatch\n } else {\n logger.warning(\n `Multiple sidecar files detected for '${this.file.path}'`,\n )\n }\n }\n\n if (validSidecars.length === 1) {\n const json = await validSidecars[0]\n .text()\n .then((text) => JSON.parse(text))\n .catch((error) => {})\n this.sidecar = { ...this.sidecar, ...json }\n }\n const nextDir = fileTree.directories.find((directory) => {\n return this.file.path.startsWith(directory.path)\n })\n if (nextDir) {\n await this.loadSidecar(nextDir)\n }\n }\n\n async loadNiftiHeader(): Promise<void> {\n if (\n this.extension.startsWith('.nii') &&\n this.dataset.options &&\n !this.dataset.options.ignoreNiftiHeaders\n ) {\n this.nifti_header = await loadHeader(this.file)\n }\n }\n\n async loadColumns(): Promise<void> {\n if (this.extension !== '.tsv') {\n return\n }\n this.columns = await this.file\n .text()\n .then((text) => parseTSV(text))\n .catch((error) => {\n logger.warning(\n `tsv file could not be opened by loadColumns '${this.file.path}'`,\n )\n logger.debug(error)\n return {}\n })\n return\n }\n\n async loadAssociations(): Promise<void> {\n this.associations = await buildAssociations(this.fileTree, this)\n return\n }\n\n async asyncLoads() {\n await Promise.allSettled([\n this.loadSidecar(),\n this.loadColumns(),\n this.loadAssociations(),\n ])\n this.loadNiftiHeader()\n }\n}\n" }, { "alpha_fraction": 0.6041162014007568, "alphanum_fraction": 0.630145251750946, "avg_line_length": 32.040000915527344, "blob_id": "17b03fea38791b1acb50e9b570e6709bf16875c3", "content_id": "e64cf9395405d2109e8ea46bafacddfa95fe4ab6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1652, "license_type": "permissive", "max_line_length": 92, "num_lines": 50, "path": "/bids-validator/src/tests/local/empty_files.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// Deno runtime tests for tests/data/empty_files\nimport { assert, assertEquals, assertObjectMatch } from '../../deps/asserts.ts'\nimport { validatePath, formatAssertIssue } from './common.ts'\n\nconst PATH = 'tests/data/empty_files'\n\n/**\n * Contains stripped down CTF format dataset: Both, BadChannels and\n * bad.segments files can be empty and still valid. Everything else must\n * not be empty.\n */\nDeno.test('empty_files dataset', async (t) => {\n const { tree, result } = await validatePath(t, PATH)\n\n await t.step('correctly ignores .bidsignore files', () => {\n assert(\n result.issues.get('NOT_INCLUDED') === undefined,\n formatAssertIssue(\n 'NOT_INCLUDED should not be present',\n result.issues.get('NOT_INCLUDED'),\n ),\n )\n })\n\n // *.meg4 and BadChannels files are empty. But only *.meg4 is an issue\n await t.step(\n 'EMPTY_FILES error is thrown for only sub-0001_task-AEF_run-01_meg.meg4',\n () => {\n const issue = result.issues.get('EMPTY_FILE')\n assert(issue, 'EMPTY_FILES was not thrown as expected')\n assertObjectMatch(issue, {\n key: 'EMPTY_FILE',\n severity: 'error',\n })\n assert(\n issue.files.get(\n '/sub-0001/meg/sub-0001_task-AEF_run-01_meg.ds/sub-0001_task-AEF_run-01_meg.meg4',\n ),\n 'sub-0001_task-AEF_run-01_meg.meg4 is empty but not present in EMPTY_FILE issue',\n )\n assertEquals(\n issue.files.get(\n 'tests/data/empty_files/sub-0001/meg/sub-0001_task-AEF_run-01_meg.ds/BadChannels',\n ),\n undefined,\n 'BadChannels should not be included in EMPTY_FILES error',\n )\n },\n )\n})\n" }, { "alpha_fraction": 0.6619718074798584, "alphanum_fraction": 0.668008029460907, "avg_line_length": 30.0625, "blob_id": "db4b73b5debb3ba4b7117d8694e39f28ed2d32e9", "content_id": "9522097e39f49dd5c9b4fafc01a2b326f31e1fde", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 497, "license_type": "permissive", "max_line_length": 74, "num_lines": 16, "path": "/bids-validator/src/schema/modalities.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { Schema } from '../types/schema.ts'\n\nexport function lookupModality(schema: Schema, datatype: string): string {\n const modalities = schema.rules.modalities as Record<string, any>\n const datatypes = Object.keys(modalities).filter((key: string) => {\n modalities[key].datatypes.includes(datatype)\n })\n if (datatypes.length === 1) {\n return datatypes[0]\n } else if (datatypes.length === 0) {\n return ''\n } else {\n // what if multiple modalites are found?\n return ''\n }\n}\n" }, { "alpha_fraction": 0.6037112474441528, "alphanum_fraction": 0.6071709394454956, "avg_line_length": 30.48019790649414, "blob_id": "927443b825f714eae3750cdfe57d3b1cf02a60f3", "content_id": "bf0c8aa654b69506d90eb1b7944da5356d12a569", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6359, "license_type": "permissive", "max_line_length": 86, "num_lines": 202, "path": "/bids-validator/validators/json/json.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nimport Ajv from 'ajv'\nconst ajv = new Ajv({ allErrors: true })\najv.addMetaSchema(require('ajv/lib/refs/json-schema-draft-06.json'))\najv.addSchema(require('./schemas/common_definitions.json'))\nconst Issue = utils.issues.Issue\n\n/**\n * JSON\n *\n * Takes a JSON file as a string and a callback\n * as arguments. And callsback with any errors\n * it finds while validating against the BIDS\n * specification.\n */\nexport default function (file, jsonContentsDict, callback) {\n // primary flow --------------------------------------------------------------------\n let issues = []\n const potentialSidecars = utils.files.potentialLocations(file.relativePath)\n const mergedDictionary = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n if (mergedDictionary) {\n issues = issues.concat(checkUnits(file, mergedDictionary))\n issues = issues.concat(compareSidecarProperties(file, mergedDictionary))\n }\n callback(issues, mergedDictionary)\n}\n\n// individual checks ---------------------------------------------------------------\n\nfunction checkUnits(file, sidecar) {\n let issues = []\n const schema = selectSchema(file)\n issues = issues.concat(validateSchema(file, sidecar, schema))\n\n issues = issues.concat(\n checkSidecarUnits(file, sidecar, { field: 'RepetitionTime', min: 100 }, 2),\n )\n\n issues = issues.concat(\n checkSidecarUnits(file, sidecar, { field: 'EchoTime', min: 1 }, 3),\n )\n issues = issues.concat(\n checkSidecarUnits(file, sidecar, { field: 'EchoTime1', min: 1 }, 4),\n )\n issues = issues.concat(\n checkSidecarUnits(file, sidecar, { field: 'EchoTime2', min: 1 }, 4),\n )\n issues = issues.concat(\n checkSidecarUnits(file, sidecar, { field: 'TotalReadoutTime', min: 10 }, 5),\n )\n\n return issues\n}\n\nconst compareSidecarProperties = (file, sidecar) => {\n const issues = []\n\n // check that EffectiveEchoSpacing < TotalReadoutTime\n if (\n sidecar.hasOwnProperty('TotalReadoutTime') &&\n sidecar.hasOwnProperty('EffectiveEchoSpacing') &&\n sidecar['TotalReadoutTime'] < sidecar['EffectiveEchoSpacing']\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 93,\n }),\n )\n }\n return issues\n}\n\nconst selectSchema = (file) => {\n let schema = null\n if (file.name) {\n if (file.name.endsWith('participants.json')) {\n schema = require('./schemas/data_dictionary.json')\n } else if (\n file.name.endsWith('bold.json') ||\n file.name.endsWith('sbref.json')\n ) {\n schema = require('./schemas/bold.json')\n } else if (file.name.endsWith('asl.json')) {\n schema = require('./schemas/asl.json')\n } else if (file.name.endsWith('pet.json')) {\n schema = require('./schemas/pet.json')\n } else if (file.name.endsWith('nirs.json')) {\n schema = require('./schemas/nirs.json')\n } else if (file.relativePath === '/dataset_description.json') {\n schema = require('./schemas/dataset_description.json')\n } else if (file.name.endsWith('meg.json')) {\n schema = require('./schemas/meg.json')\n } else if (file.name.endsWith('ieeg.json')) {\n schema = require('./schemas/ieeg.json')\n } else if (file.name.endsWith('eeg.json')) {\n schema = require('./schemas/eeg.json')\n } else if (\n file.name.endsWith('TEM.json') ||\n file.name.endsWith('SEM.json') ||\n file.name.endsWith('uCT.json') ||\n file.name.endsWith('BF.json') ||\n file.name.endsWith('DF.json') ||\n file.name.endsWith('PC.json') ||\n file.name.endsWith('DIC.json') ||\n file.name.endsWith('FLUO.json') ||\n file.name.endsWith('CONF.json') ||\n file.name.endsWith('PLI.json') ||\n file.name.endsWith('CARS.json') ||\n file.name.endsWith('2PE.json') ||\n file.name.endsWith('MPE.json') ||\n file.name.endsWith('SR.json') ||\n file.name.endsWith('NLO.json') ||\n file.name.endsWith('OCT.json') ||\n file.name.endsWith('SPIM.json')\n ) {\n schema = require('./schemas/microscopy.json')\n } else if (\n file.relativePath.includes('/micr') &&\n file.name.endsWith('photo.json')\n ) {\n schema = require('./schemas/microscopy_photo.json')\n } else if (\n file.relativePath.includes('/meg/') &&\n file.name.endsWith('coordsystem.json')\n ) {\n schema = require('./schemas/coordsystem_meg.json')\n } else if (\n file.relativePath.includes('/ieeg/') &&\n file.name.endsWith('coordsystem.json')\n ) {\n schema = require('./schemas/coordsystem_ieeg.json')\n } else if (\n file.relativePath.includes('/eeg/') &&\n file.name.endsWith('coordsystem.json')\n ) {\n schema = require('./schemas/coordsystem_eeg.json')\n } else if (\n file.relativePath.includes('/nirs/') &&\n file.name.endsWith('coordsystem.json')\n ) {\n schema = require('./schemas/coordsystem_nirs.json')\n } else if (file.name.endsWith('genetic_info.json')) {\n schema = require('./schemas/genetic_info.json')\n } else if (\n file.relativePath.includes('/pet/') &&\n file.name.endsWith('blood.json')\n ) {\n schema = require('./schemas/pet_blood.json')\n } else if (\n file.name.endsWith('physio.json') ||\n file.name.endsWith('stim.json')\n ) {\n schema = require('./schemas/physio.json')\n } else if (file.name.endsWith('events.json')) {\n schema = require('./schemas/events.json')\n } else if (file.name.endsWith('beh.json')) {\n schema = require('./schemas/beh.json')\n } else if (file.name.endsWith('_motion.json')) {\n schema = require('./schemas/motion.json')\n }\n }\n return schema\n}\n\nconst validateSchema = (file, sidecar, schema) => {\n const issues = []\n if (schema) {\n const validate = ajv.compile(schema)\n const valid = validate(sidecar)\n if (!valid) {\n validate.errors.map((error) =>\n issues.push(\n new Issue({\n file: file,\n code: 55,\n evidence: error.dataPath + ' ' + error.message,\n }),\n ),\n )\n }\n }\n return issues\n}\n\nconst checkSidecarUnits = (file, sidecar, fieldObj, errCode) => {\n const issues = []\n const field = fieldObj.field\n const min = fieldObj.min\n if (sidecar.hasOwnProperty(field) && sidecar[field] > min) {\n issues.push(\n new Issue({\n code: errCode,\n file: file,\n }),\n )\n }\n return issues\n}\n" }, { "alpha_fraction": 0.6541725397109985, "alphanum_fraction": 0.6888260245323181, "avg_line_length": 43.1875, "blob_id": "2b0a604d843be35d4de750ac6213ed4a6a7a0061", "content_id": "c8981269bdf84e2080d19fa8a88d33cdd289583b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1414, "license_type": "permissive", "max_line_length": 109, "num_lines": 32, "path": "/bids-validator/src/utils/logger.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals } from '../deps/asserts.ts'\nimport { parseStack } from './logger.ts'\n\nDeno.test('logger', async (t) => {\n await t.step('test stack trace behavior for regular invocation', () => {\n const stack = `Error\n at Object.get (file:///bids-validator/src/utils/logger.ts:39:19)\n at file:///bids-validator/src/schema/context.ts:170:16\n at async BIDSContext.loadColumns (file:///bids-validator/src/schema/context.ts:163:20)\n at async Function.allSettled (<anonymous>)\n at async BIDSContext.asyncLoads (file:///bids-validator/src/schema/context.ts:182:5)\n at async validate (file:///bids-validator/src/validators/bids.ts:78:5)\n at async main (file:///bids-validator/src/main.ts:26:24)\n at async file:///bids-validator/bids-validator-deno:4:1\n`\n assertEquals(\n parseStack(stack),\n `file:///bids-validator/src/schema/context.ts:170:16`,\n )\n })\n await t.step('test stack trace behavior for catch invocation', () => {\n const stack = `Error\n at Object.get (file:///bids-validator/bids-validator/src/utils/logger.ts:31:19)\n at loadHeader (file:///bids-validator/bids-validator/src/files/nifti.ts:18:12)\n at async BIDSContext.loadNiftiHeader (file:///bids-validator/bids-validator/src/schema/context.ts:155:27)\n`\n assertEquals(\n parseStack(stack),\n 'loadHeader (file:///bids-validator/bids-validator/src/files/nifti.ts:18:12)',\n )\n })\n})\n" }, { "alpha_fraction": 0.5458290576934814, "alphanum_fraction": 0.546343982219696, "avg_line_length": 23.897436141967773, "blob_id": "dd344cb2d72d747d256f25c37ff50c42e9a886d8", "content_id": "e6036a3f8aeecb10ec6d9991bbba00aab56abcd9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1942, "license_type": "permissive", "max_line_length": 67, "num_lines": 78, "path": "/bids-validator/src/schema/expressionLanguage.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "function exists(list: string[], rule: string = 'dataset'): number {\n const prefix: string[] = []\n\n // Stimuli and subject-relative paths get prefixes\n if (rule == 'stimuli') {\n prefix.push('stimuli')\n } else if (rule == 'subject') {\n // @ts-expect-error\n prefix.push('sub-' + this.entities.subject)\n }\n\n if (!Array.isArray(list)) {\n list = [list]\n }\n if (rule == 'bids-uri') {\n // XXX To implement\n return list.length\n } else {\n // dataset, subject and stimuli\n return list.filter((x) => {\n const parts = prefix.concat(x.split('/'))\n // @ts-expect-error\n return this.fileTree.contains(parts)\n }).length\n }\n}\n\nexport const expressionFunctions = {\n index: <T>(list: T[], item: T): number | null => {\n const index = list.indexOf(item)\n return index != -1 ? index : null\n },\n intersects: <T>(a: T[], b: T[]): boolean => {\n if (!Array.isArray(a)) {\n a = [a]\n }\n if (!Array.isArray(b)) {\n b = [b]\n }\n return a.some((x) => b.includes(x))\n },\n match: (target: string, regex: string): boolean => {\n let re = RegExp(regex)\n return target.match(re) !== null\n },\n type: <T>(operand: T): string => {\n if (Array.isArray(operand)) {\n return 'array'\n }\n if (typeof operand === 'undefined') {\n return 'null'\n }\n return typeof operand\n },\n min: (list: number[]): number => {\n return Math.min(...list)\n },\n max: (list: number[]): number => {\n return Math.max(...list)\n },\n length: <T>(list: T[]): number | null => {\n if (Array.isArray(list) || typeof list == 'string') {\n return list.length\n }\n return null\n },\n count: <T>(list: T[], val: T): number => {\n return list.filter((x) => x === val).length\n },\n exists: exists,\n substr: (arg: string, start: number, end: number): string => {\n return arg.substr(start, end - start)\n },\n sorted: <T>(list: T[]): T[] => {\n list.sort()\n return list\n },\n}\n" }, { "alpha_fraction": 0.5822519063949585, "alphanum_fraction": 0.7190796136856079, "avg_line_length": 58.99815368652344, "blob_id": "64aba54a6554f289d19ebfa66282bb0cdb222b30", "content_id": "936ab53660a7cf6137a04ba916d4fcdfc3a5d22e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 130076, "license_type": "permissive", "max_line_length": 109, "num_lines": 2168, "path": "/bids-validator/tests/data/collectModalities-data.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// MEG\nexport const ds000247 = [\n '/participants.json',\n '/participants.tsv',\n '/sub-0007/ses-01/anat/sub-0007_ses-01_T1w.nii.gz',\n '/sub-0007/ses-01/sub-0007_ses-01_scans.tsv',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.json',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_headshape.pos',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_channels.tsv',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/processing.cfg',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/params.dsc',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.infods',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/ClassFile.cls.bak',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.hc',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.newds',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/bad.segments',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/BadChannels',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.hist',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.acq',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.meg4',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/ClassFile.cls',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/default.de',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_task-rest_run-01_meg.ds/sub-0007_ses-01_task-rest_run-01_meg.res4',\n '/sub-0007/ses-01/meg/sub-0007_ses-01_coordsystem.json',\n '/sub-0002/ses-01/anat/sub-0002_ses-01_T1w.nii.gz',\n '/sub-0002/ses-01/sub-0002_ses-01_scans.tsv',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_coordsystem.json',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.res4',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.infods',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/processing.cfg',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/params.dsc',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.hc',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/ClassFile.cls.bak',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.hist',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.meg4',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.acq',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/sub-0002_ses-01_task-rest_run-01_meg.newds',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/ClassFile.cls',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.ds/default.de',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_headshape.pos',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_meg.json',\n '/sub-0002/ses-01/meg/sub-0002_ses-01_task-rest_run-01_channels.tsv',\n '/sub-0004/ses-01/anat/sub-0004_ses-01_T1w.nii.gz',\n '/sub-0004/ses-01/sub-0004_ses-01_scans.tsv',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_coordsystem.json',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.hist',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.infods',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/processing.cfg',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/params.dsc',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.meg4',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/ClassFile.cls.bak',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.hc',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/bad.segments',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/BadChannels',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/ClassFile.cls',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.res4',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.acq',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.ds/sub-0004_ses-01_task-rest_run-01_meg.newds',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_headshape.pos',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_channels.tsv',\n '/sub-0004/ses-01/meg/sub-0004_ses-01_task-rest_run-01_meg.json',\n '/.datalad/.gitattributes',\n '/.datalad/config',\n '/dataset_description.json',\n '/README',\n '/sub-0003/ses-01/sub-0003_ses-01_scans.tsv',\n '/sub-0003/ses-01/anat/sub-0003_ses-01_T1w.nii.gz',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_headshape.pos',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_channels.tsv',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_coordsystem.json',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.json',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.newds',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.infods',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.acq',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/processing.cfg',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/params.dsc',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.res4',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/ClassFile.cls.bak',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.meg4',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/bad.segments',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.hist',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/sub-0003_ses-01_task-rest_run-01_meg.hc',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/BadChannels',\n '/sub-0003/ses-01/meg/sub-0003_ses-01_task-rest_run-01_meg.ds/ClassFile.cls',\n]\n\n// PET\nexport const ds001421 = [\n '/sub-01/ses-02/pet/sub-01_ses-02_pet.json',\n '/sub-01/ses-02/pet/sub-01_ses-02_pet.nii.gz',\n '/sub-01/ses-02/anat/sub-01_ses-02_T1w.json',\n '/sub-01/ses-02/anat/sub-01_ses-02_T1w.nii',\n '/sub-01/ses-01/pet/sub-01_ses-01_pet.nii.gz',\n '/sub-01/ses-01/pet/sub-01_ses-01_pet.json',\n '/sub-01/ses-01/anat/sub-01_ses-01_T1w.nii',\n '/sub-01/ses-01/anat/sub-01_ses-01_T1w.json',\n]\n\n// MRI and many files example\nexport const ds001734 = [\n '/sub-116/fmap/sub-116_magnitude1.nii.gz',\n '/sub-116/fmap/sub-116_magnitude2.nii.gz',\n '/sub-116/fmap/sub-116_phasediff.nii.gz',\n '/sub-116/fmap/sub-116_phasediff.json',\n '/sub-116/anat/sub-116_T1w.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-02_sbref.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-01_sbref.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-03_bold.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-04_bold.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-04_events.tsv',\n '/sub-116/func/sub-116_task-MGT_run-02_events.tsv',\n '/sub-116/func/sub-116_task-MGT_run-03_events.tsv',\n '/sub-116/func/sub-116_task-MGT_run-01_events.tsv',\n '/sub-116/func/sub-116_task-MGT_run-04_sbref.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-03_sbref.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-01_bold.nii.gz',\n '/sub-116/func/sub-116_task-MGT_run-02_bold.nii.gz',\n '/sub-036/fmap/sub-036_magnitude2.nii.gz',\n '/sub-036/fmap/sub-036_magnitude1.nii.gz',\n '/sub-036/fmap/sub-036_phasediff.json',\n '/sub-036/fmap/sub-036_phasediff.nii.gz',\n '/sub-036/anat/sub-036_T1w.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-04_bold.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-01_events.tsv',\n '/sub-036/func/sub-036_task-MGT_run-02_sbref.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-01_sbref.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-02_events.tsv',\n '/sub-036/func/sub-036_task-MGT_run-01_bold.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-02_bold.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-03_bold.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-03_sbref.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-04_sbref.nii.gz',\n '/sub-036/func/sub-036_task-MGT_run-04_events.tsv',\n '/sub-036/func/sub-036_task-MGT_run-03_events.tsv',\n '/sub-027/fmap/sub-027_phasediff.json',\n '/sub-027/fmap/sub-027_magnitude2.nii.gz',\n '/sub-027/fmap/sub-027_magnitude1.nii.gz',\n '/sub-027/fmap/sub-027_phasediff.nii.gz',\n '/sub-027/anat/sub-027_T1w.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-01_sbref.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-02_events.tsv',\n '/sub-027/func/sub-027_task-MGT_run-02_bold.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-03_bold.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-04_sbref.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-02_sbref.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-04_events.tsv',\n '/sub-027/func/sub-027_task-MGT_run-01_bold.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-03_events.tsv',\n '/sub-027/func/sub-027_task-MGT_run-03_sbref.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-04_bold.nii.gz',\n '/sub-027/func/sub-027_task-MGT_run-01_events.tsv',\n '/sub-030/fmap/sub-030_magnitude1.nii.gz',\n '/sub-030/fmap/sub-030_phasediff.nii.gz',\n '/sub-030/fmap/sub-030_magnitude2.nii.gz',\n '/sub-030/fmap/sub-030_phasediff.json',\n '/sub-030/anat/sub-030_T1w.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-04_events.tsv',\n '/sub-030/func/sub-030_task-MGT_run-01_bold.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-02_events.tsv',\n '/sub-030/func/sub-030_task-MGT_run-02_sbref.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-03_events.tsv',\n '/sub-030/func/sub-030_task-MGT_run-01_events.tsv',\n '/sub-030/func/sub-030_task-MGT_run-02_bold.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-01_sbref.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-04_sbref.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-04_bold.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-03_sbref.nii.gz',\n '/sub-030/func/sub-030_task-MGT_run-03_bold.nii.gz',\n '/sub-061/fmap/sub-061_phasediff.json',\n '/sub-061/fmap/sub-061_magnitude1.nii.gz',\n '/sub-061/fmap/sub-061_magnitude2.nii.gz',\n '/sub-061/fmap/sub-061_phasediff.nii.gz',\n '/sub-061/anat/sub-061_T1w.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-02_events.tsv',\n '/sub-061/func/sub-061_task-MGT_run-01_sbref.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-04_bold.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-02_sbref.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-03_sbref.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-02_bold.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-03_bold.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-03_events.tsv',\n '/sub-061/func/sub-061_task-MGT_run-01_bold.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-01_events.tsv',\n '/sub-061/func/sub-061_task-MGT_run-04_sbref.nii.gz',\n '/sub-061/func/sub-061_task-MGT_run-04_events.tsv',\n '/sub-058/fmap/sub-058_phasediff.nii.gz',\n '/sub-058/fmap/sub-058_phasediff.json',\n '/sub-058/fmap/sub-058_magnitude2.nii.gz',\n '/sub-058/fmap/sub-058_magnitude1.nii.gz',\n '/sub-058/anat/sub-058_T1w.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-02_bold.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-04_sbref.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-02_sbref.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-01_bold.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-03_bold.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-01_sbref.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-03_events.tsv',\n '/sub-058/func/sub-058_task-MGT_run-01_events.tsv',\n '/sub-058/func/sub-058_task-MGT_run-04_bold.nii.gz',\n '/sub-058/func/sub-058_task-MGT_run-04_events.tsv',\n '/sub-058/func/sub-058_task-MGT_run-02_events.tsv',\n '/sub-058/func/sub-058_task-MGT_run-03_sbref.nii.gz',\n '/sub-008/fmap/sub-008_phasediff.nii.gz',\n '/sub-008/fmap/sub-008_magnitude1.nii.gz',\n '/sub-008/fmap/sub-008_magnitude2.nii.gz',\n '/sub-008/fmap/sub-008_phasediff.json',\n '/sub-008/anat/sub-008_T1w.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-03_sbref.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-04_bold.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-04_events.tsv',\n '/sub-008/func/sub-008_task-MGT_run-03_bold.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-02_bold.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-01_sbref.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-01_bold.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-01_events.tsv',\n '/sub-008/func/sub-008_task-MGT_run-03_events.tsv',\n '/sub-008/func/sub-008_task-MGT_run-02_sbref.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-04_sbref.nii.gz',\n '/sub-008/func/sub-008_task-MGT_run-02_events.tsv',\n '/sub-021/fmap/sub-021_phasediff.json',\n '/sub-021/fmap/sub-021_magnitude1.nii.gz',\n '/sub-021/fmap/sub-021_magnitude2.nii.gz',\n '/sub-021/fmap/sub-021_phasediff.nii.gz',\n '/sub-021/anat/sub-021_T1w.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-04_bold.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-03_events.tsv',\n '/sub-021/func/sub-021_task-MGT_run-02_sbref.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-03_bold.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-02_events.tsv',\n '/sub-021/func/sub-021_task-MGT_run-01_bold.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-01_sbref.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-04_events.tsv',\n '/sub-021/func/sub-021_task-MGT_run-01_events.tsv',\n '/sub-021/func/sub-021_task-MGT_run-02_bold.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-03_sbref.nii.gz',\n '/sub-021/func/sub-021_task-MGT_run-04_sbref.nii.gz',\n '/sub-102/fmap/sub-102_magnitude2.nii.gz',\n '/sub-102/fmap/sub-102_phasediff.nii.gz',\n '/sub-102/fmap/sub-102_magnitude1.nii.gz',\n '/sub-102/fmap/sub-102_phasediff.json',\n '/sub-102/anat/sub-102_T1w.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-02_bold.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-01_sbref.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-04_events.tsv',\n '/sub-102/func/sub-102_task-MGT_run-01_bold.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-03_sbref.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-03_events.tsv',\n '/sub-102/func/sub-102_task-MGT_run-01_events.tsv',\n '/sub-102/func/sub-102_task-MGT_run-04_bold.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-02_events.tsv',\n '/sub-102/func/sub-102_task-MGT_run-04_sbref.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-03_bold.nii.gz',\n '/sub-102/func/sub-102_task-MGT_run-02_sbref.nii.gz',\n '/sub-090/fmap/sub-090_phasediff.nii.gz',\n '/sub-090/fmap/sub-090_magnitude1.nii.gz',\n '/sub-090/fmap/sub-090_magnitude2.nii.gz',\n '/sub-090/fmap/sub-090_phasediff.json',\n '/sub-090/anat/sub-090_T1w.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-04_sbref.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-01_bold.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-03_sbref.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-04_events.tsv',\n '/sub-090/func/sub-090_task-MGT_run-02_bold.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-01_events.tsv',\n '/sub-090/func/sub-090_task-MGT_run-02_sbref.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-02_events.tsv',\n '/sub-090/func/sub-090_task-MGT_run-04_bold.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-03_events.tsv',\n '/sub-090/func/sub-090_task-MGT_run-01_sbref.nii.gz',\n '/sub-090/func/sub-090_task-MGT_run-03_bold.nii.gz',\n '/sub-001/fmap/sub-001_magnitude2.nii.gz',\n '/sub-001/fmap/sub-001_phasediff.nii.gz',\n '/sub-001/fmap/sub-001_phasediff.json',\n '/sub-001/fmap/sub-001_magnitude1.nii.gz',\n '/sub-001/anat/sub-001_T1w.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-04_events.tsv',\n '/sub-001/func/sub-001_task-MGT_run-03_sbref.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-01_bold.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-02_sbref.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-02_events.tsv',\n '/sub-001/func/sub-001_task-MGT_run-04_sbref.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-01_events.tsv',\n '/sub-001/func/sub-001_task-MGT_run-03_events.tsv',\n '/sub-001/func/sub-001_task-MGT_run-01_sbref.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-03_bold.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-04_bold.nii.gz',\n '/sub-001/func/sub-001_task-MGT_run-02_bold.nii.gz',\n '/participants.tsv',\n '/sub-015/fmap/sub-015_phasediff.json',\n '/sub-015/fmap/sub-015_magnitude2.nii.gz',\n '/sub-015/fmap/sub-015_phasediff.nii.gz',\n '/sub-015/fmap/sub-015_magnitude1.nii.gz',\n '/sub-015/anat/sub-015_T1w.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-01_sbref.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-03_sbref.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-01_bold.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-02_sbref.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-04_events.tsv',\n '/sub-015/func/sub-015_task-MGT_run-03_bold.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-04_sbref.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-01_events.tsv',\n '/sub-015/func/sub-015_task-MGT_run-02_bold.nii.gz',\n '/sub-015/func/sub-015_task-MGT_run-03_events.tsv',\n '/sub-015/func/sub-015_task-MGT_run-02_events.tsv',\n '/sub-015/func/sub-015_task-MGT_run-04_bold.nii.gz',\n '/sub-077/fmap/sub-077_phasediff.nii.gz',\n '/sub-077/fmap/sub-077_magnitude1.nii.gz',\n '/sub-077/fmap/sub-077_magnitude2.nii.gz',\n '/sub-077/fmap/sub-077_phasediff.json',\n '/sub-077/anat/sub-077_T1w.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-02_bold.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-02_events.tsv',\n '/sub-077/func/sub-077_task-MGT_run-04_events.tsv',\n '/sub-077/func/sub-077_task-MGT_run-01_bold.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-01_sbref.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-03_events.tsv',\n '/sub-077/func/sub-077_task-MGT_run-01_events.tsv',\n '/sub-077/func/sub-077_task-MGT_run-04_sbref.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-03_sbref.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-04_bold.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-03_bold.nii.gz',\n '/sub-077/func/sub-077_task-MGT_run-02_sbref.nii.gz',\n '/sub-094/fmap/sub-094_magnitude2.nii.gz',\n '/sub-094/fmap/sub-094_magnitude1.nii.gz',\n '/sub-094/fmap/sub-094_phasediff.nii.gz',\n '/sub-094/fmap/sub-094_phasediff.json',\n '/sub-094/anat/sub-094_T1w.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-04_sbref.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-04_bold.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-04_events.tsv',\n '/sub-094/func/sub-094_task-MGT_run-01_events.tsv',\n '/sub-094/func/sub-094_task-MGT_run-02_bold.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-02_events.tsv',\n '/sub-094/func/sub-094_task-MGT_run-03_events.tsv',\n '/sub-094/func/sub-094_task-MGT_run-01_sbref.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-03_sbref.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-02_sbref.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-03_bold.nii.gz',\n '/sub-094/func/sub-094_task-MGT_run-01_bold.nii.gz',\n '/sub-080/fmap/sub-080_phasediff.nii.gz',\n '/sub-080/fmap/sub-080_magnitude1.nii.gz',\n '/sub-080/fmap/sub-080_phasediff.json',\n '/sub-080/fmap/sub-080_magnitude2.nii.gz',\n '/sub-080/anat/sub-080_T1w.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-03_events.tsv',\n '/sub-080/func/sub-080_task-MGT_run-04_sbref.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-04_bold.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-03_bold.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-02_bold.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-03_sbref.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-02_sbref.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-02_events.tsv',\n '/sub-080/func/sub-080_task-MGT_run-01_events.tsv',\n '/sub-080/func/sub-080_task-MGT_run-04_events.tsv',\n '/sub-080/func/sub-080_task-MGT_run-01_sbref.nii.gz',\n '/sub-080/func/sub-080_task-MGT_run-01_bold.nii.gz',\n '/sub-069/fmap/sub-069_phasediff.nii.gz',\n '/sub-069/fmap/sub-069_phasediff.json',\n '/sub-069/fmap/sub-069_magnitude1.nii.gz',\n '/sub-069/fmap/sub-069_magnitude2.nii.gz',\n '/sub-069/anat/sub-069_T1w.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-04_sbref.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-03_sbref.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-02_events.tsv',\n '/sub-069/func/sub-069_task-MGT_run-03_events.tsv',\n '/sub-069/func/sub-069_task-MGT_run-01_events.tsv',\n '/sub-069/func/sub-069_task-MGT_run-02_sbref.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-04_bold.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-01_bold.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-02_bold.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-04_events.tsv',\n '/sub-069/func/sub-069_task-MGT_run-01_sbref.nii.gz',\n '/sub-069/func/sub-069_task-MGT_run-03_bold.nii.gz',\n '/task-MGT_bold.json',\n '/sub-016/fmap/sub-016_magnitude1.nii.gz',\n '/sub-016/fmap/sub-016_phasediff.json',\n '/sub-016/fmap/sub-016_magnitude2.nii.gz',\n '/sub-016/fmap/sub-016_phasediff.nii.gz',\n '/sub-016/anat/sub-016_T1w.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-02_sbref.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-04_bold.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-02_bold.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-03_sbref.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-01_bold.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-04_sbref.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-01_sbref.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-04_events.tsv',\n '/sub-016/func/sub-016_task-MGT_run-03_bold.nii.gz',\n '/sub-016/func/sub-016_task-MGT_run-01_events.tsv',\n '/sub-016/func/sub-016_task-MGT_run-02_events.tsv',\n '/sub-016/func/sub-016_task-MGT_run-03_events.tsv',\n '/sub-114/fmap/sub-114_magnitude2.nii.gz',\n '/sub-114/fmap/sub-114_magnitude1.nii.gz',\n '/sub-114/fmap/sub-114_phasediff.json',\n '/sub-114/fmap/sub-114_phasediff.nii.gz',\n '/sub-114/anat/sub-114_T1w.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-04_bold.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-04_events.tsv',\n '/sub-114/func/sub-114_task-MGT_run-01_sbref.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-02_sbref.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-03_bold.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-01_bold.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-01_events.tsv',\n '/sub-114/func/sub-114_task-MGT_run-02_events.tsv',\n '/sub-114/func/sub-114_task-MGT_run-03_events.tsv',\n '/sub-114/func/sub-114_task-MGT_run-04_sbref.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-02_bold.nii.gz',\n '/sub-114/func/sub-114_task-MGT_run-03_sbref.nii.gz',\n '/sub-115/fmap/sub-115_magnitude1.nii.gz',\n '/sub-115/fmap/sub-115_phasediff.nii.gz',\n '/sub-115/fmap/sub-115_magnitude2.nii.gz',\n '/sub-115/fmap/sub-115_phasediff.json',\n '/sub-115/anat/sub-115_T1w.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-02_bold.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-03_events.tsv',\n '/sub-115/func/sub-115_task-MGT_run-01_bold.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-02_events.tsv',\n '/sub-115/func/sub-115_task-MGT_run-04_sbref.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-03_sbref.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-01_sbref.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-01_events.tsv',\n '/sub-115/func/sub-115_task-MGT_run-04_bold.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-02_sbref.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-03_bold.nii.gz',\n '/sub-115/func/sub-115_task-MGT_run-04_events.tsv',\n '/sub-010/fmap/sub-010_phasediff.json',\n '/sub-010/fmap/sub-010_magnitude2.nii.gz',\n '/sub-010/fmap/sub-010_phasediff.nii.gz',\n '/sub-010/fmap/sub-010_magnitude1.nii.gz',\n '/sub-010/anat/sub-010_T1w.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-03_sbref.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-01_sbref.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-01_events.tsv',\n '/sub-010/func/sub-010_task-MGT_run-03_bold.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-02_events.tsv',\n '/sub-010/func/sub-010_task-MGT_run-04_bold.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-04_sbref.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-03_events.tsv',\n '/sub-010/func/sub-010_task-MGT_run-02_bold.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-02_sbref.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-01_bold.nii.gz',\n '/sub-010/func/sub-010_task-MGT_run-04_events.tsv',\n '/task-MGT_sbref.json',\n '/sub-026/fmap/sub-026_magnitude2.nii.gz',\n '/sub-026/fmap/sub-026_magnitude1.nii.gz',\n '/sub-026/fmap/sub-026_phasediff.json',\n '/sub-026/fmap/sub-026_phasediff.nii.gz',\n '/sub-026/anat/sub-026_T1w.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-02_events.tsv',\n '/sub-026/func/sub-026_task-MGT_run-04_sbref.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-01_events.tsv',\n '/sub-026/func/sub-026_task-MGT_run-03_events.tsv',\n '/sub-026/func/sub-026_task-MGT_run-04_bold.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-03_bold.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-02_sbref.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-04_events.tsv',\n '/sub-026/func/sub-026_task-MGT_run-01_bold.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-01_sbref.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-02_bold.nii.gz',\n '/sub-026/func/sub-026_task-MGT_run-03_sbref.nii.gz',\n '/sub-092/fmap/sub-092_phasediff.nii.gz',\n '/sub-092/fmap/sub-092_phasediff.json',\n '/sub-092/fmap/sub-092_magnitude2.nii.gz',\n '/sub-092/fmap/sub-092_magnitude1.nii.gz',\n '/sub-092/anat/sub-092_T1w.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-02_events.tsv',\n '/sub-092/func/sub-092_task-MGT_run-02_bold.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-02_sbref.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-03_bold.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-04_events.tsv',\n '/sub-092/func/sub-092_task-MGT_run-04_sbref.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-01_bold.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-04_bold.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-01_events.tsv',\n '/sub-092/func/sub-092_task-MGT_run-03_events.tsv',\n '/sub-092/func/sub-092_task-MGT_run-03_sbref.nii.gz',\n '/sub-092/func/sub-092_task-MGT_run-01_sbref.nii.gz',\n '/sub-099/fmap/sub-099_magnitude1.nii.gz',\n '/sub-099/fmap/sub-099_phasediff.json',\n '/sub-099/fmap/sub-099_phasediff.nii.gz',\n '/sub-099/fmap/sub-099_magnitude2.nii.gz',\n '/sub-099/anat/sub-099_T1w.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-04_events.tsv',\n '/sub-099/func/sub-099_task-MGT_run-02_events.tsv',\n '/sub-099/func/sub-099_task-MGT_run-01_sbref.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-03_events.tsv',\n '/sub-099/func/sub-099_task-MGT_run-04_sbref.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-01_events.tsv',\n '/sub-099/func/sub-099_task-MGT_run-02_bold.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-02_sbref.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-04_bold.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-03_bold.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-03_sbref.nii.gz',\n '/sub-099/func/sub-099_task-MGT_run-01_bold.nii.gz',\n '/sub-052/fmap/sub-052_phasediff.json',\n '/sub-052/fmap/sub-052_magnitude2.nii.gz',\n '/sub-052/fmap/sub-052_phasediff.nii.gz',\n '/sub-052/fmap/sub-052_magnitude1.nii.gz',\n '/sub-052/anat/sub-052_T1w.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-01_events.tsv',\n '/sub-052/func/sub-052_task-MGT_run-04_bold.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-02_bold.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-03_bold.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-01_bold.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-02_events.tsv',\n '/sub-052/func/sub-052_task-MGT_run-03_sbref.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-03_events.tsv',\n '/sub-052/func/sub-052_task-MGT_run-04_events.tsv',\n '/sub-052/func/sub-052_task-MGT_run-04_sbref.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-02_sbref.nii.gz',\n '/sub-052/func/sub-052_task-MGT_run-01_sbref.nii.gz',\n '/sub-014/fmap/sub-014_magnitude1.nii.gz',\n '/sub-014/fmap/sub-014_phasediff.json',\n '/sub-014/fmap/sub-014_phasediff.nii.gz',\n '/sub-014/fmap/sub-014_magnitude2.nii.gz',\n '/sub-014/anat/sub-014_T1w.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-04_bold.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-01_events.tsv',\n '/sub-014/func/sub-014_task-MGT_run-03_bold.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-02_sbref.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-04_events.tsv',\n '/sub-014/func/sub-014_task-MGT_run-01_bold.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-03_events.tsv',\n '/sub-014/func/sub-014_task-MGT_run-01_sbref.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-03_sbref.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-02_events.tsv',\n '/sub-014/func/sub-014_task-MGT_run-04_sbref.nii.gz',\n '/sub-014/func/sub-014_task-MGT_run-02_bold.nii.gz',\n '/sub-119/fmap/sub-119_phasediff.nii.gz',\n '/sub-119/fmap/sub-119_magnitude2.nii.gz',\n '/sub-119/fmap/sub-119_magnitude1.nii.gz',\n '/sub-119/fmap/sub-119_phasediff.json',\n '/sub-119/anat/sub-119_T1w.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-04_sbref.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-01_bold.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-03_events.tsv',\n '/sub-119/func/sub-119_task-MGT_run-03_bold.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-01_sbref.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-04_bold.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-01_events.tsv',\n '/sub-119/func/sub-119_task-MGT_run-02_events.tsv',\n '/sub-119/func/sub-119_task-MGT_run-03_sbref.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-02_bold.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-02_sbref.nii.gz',\n '/sub-119/func/sub-119_task-MGT_run-04_events.tsv',\n '/sub-040/fmap/sub-040_magnitude1.nii.gz',\n '/sub-040/fmap/sub-040_magnitude2.nii.gz',\n '/sub-040/fmap/sub-040_phasediff.nii.gz',\n '/sub-040/fmap/sub-040_phasediff.json',\n '/sub-040/anat/sub-040_T1w.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-01_bold.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-04_events.tsv',\n '/sub-040/func/sub-040_task-MGT_run-01_events.tsv',\n '/sub-040/func/sub-040_task-MGT_run-03_events.tsv',\n '/sub-040/func/sub-040_task-MGT_run-03_sbref.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-02_events.tsv',\n '/sub-040/func/sub-040_task-MGT_run-02_bold.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-02_sbref.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-03_bold.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-04_bold.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-01_sbref.nii.gz',\n '/sub-040/func/sub-040_task-MGT_run-04_sbref.nii.gz',\n '/sub-106/fmap/sub-106_magnitude1.nii.gz',\n '/sub-106/fmap/sub-106_phasediff.json',\n '/sub-106/fmap/sub-106_magnitude2.nii.gz',\n '/sub-106/fmap/sub-106_phasediff.nii.gz',\n '/sub-106/anat/sub-106_T1w.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-03_sbref.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-01_sbref.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-03_events.tsv',\n '/sub-106/func/sub-106_task-MGT_run-03_bold.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-02_events.tsv',\n '/sub-106/func/sub-106_task-MGT_run-01_bold.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-04_bold.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-04_sbref.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-02_bold.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-04_events.tsv',\n '/sub-106/func/sub-106_task-MGT_run-02_sbref.nii.gz',\n '/sub-106/func/sub-106_task-MGT_run-01_events.tsv',\n '/sub-006/fmap/sub-006_magnitude1.nii.gz',\n '/sub-006/fmap/sub-006_magnitude2.nii.gz',\n '/sub-006/fmap/sub-006_phasediff.json',\n '/sub-006/fmap/sub-006_phasediff.nii.gz',\n '/sub-006/anat/sub-006_T1w.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-01_events.tsv',\n '/sub-006/func/sub-006_task-MGT_run-03_sbref.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-02_bold.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-02_events.tsv',\n '/sub-006/func/sub-006_task-MGT_run-03_events.tsv',\n '/sub-006/func/sub-006_task-MGT_run-02_sbref.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-01_bold.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-04_bold.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-01_sbref.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-04_events.tsv',\n '/sub-006/func/sub-006_task-MGT_run-04_sbref.nii.gz',\n '/sub-006/func/sub-006_task-MGT_run-03_bold.nii.gz',\n '/sub-037/fmap/sub-037_phasediff.json',\n '/sub-037/fmap/sub-037_phasediff.nii.gz',\n '/sub-037/fmap/sub-037_magnitude2.nii.gz',\n '/sub-037/fmap/sub-037_magnitude1.nii.gz',\n '/sub-037/anat/sub-037_T1w.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-03_sbref.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-02_sbref.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-03_events.tsv',\n '/sub-037/func/sub-037_task-MGT_run-02_bold.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-01_sbref.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-04_sbref.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-01_events.tsv',\n '/sub-037/func/sub-037_task-MGT_run-04_bold.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-03_bold.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-01_bold.nii.gz',\n '/sub-037/func/sub-037_task-MGT_run-04_events.tsv',\n '/sub-037/func/sub-037_task-MGT_run-02_events.tsv',\n '/sub-076/fmap/sub-076_magnitude1.nii.gz',\n '/sub-076/fmap/sub-076_phasediff.nii.gz',\n '/sub-076/fmap/sub-076_magnitude2.nii.gz',\n '/sub-076/fmap/sub-076_phasediff.json',\n '/sub-076/anat/sub-076_T1w.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-01_events.tsv',\n '/sub-076/func/sub-076_task-MGT_run-01_sbref.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-03_bold.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-04_sbref.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-02_bold.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-04_events.tsv',\n '/sub-076/func/sub-076_task-MGT_run-02_sbref.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-03_events.tsv',\n '/sub-076/func/sub-076_task-MGT_run-01_bold.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-04_bold.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-03_sbref.nii.gz',\n '/sub-076/func/sub-076_task-MGT_run-02_events.tsv',\n '/sub-088/fmap/sub-088_phasediff.json',\n '/sub-088/fmap/sub-088_magnitude1.nii.gz',\n '/sub-088/fmap/sub-088_magnitude2.nii.gz',\n '/sub-088/fmap/sub-088_phasediff.nii.gz',\n '/sub-088/anat/sub-088_T1w.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-02_bold.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-01_sbref.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-04_events.tsv',\n '/sub-088/func/sub-088_task-MGT_run-03_sbref.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-02_sbref.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-04_bold.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-01_events.tsv',\n '/sub-088/func/sub-088_task-MGT_run-04_sbref.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-02_events.tsv',\n '/sub-088/func/sub-088_task-MGT_run-03_events.tsv',\n '/sub-088/func/sub-088_task-MGT_run-01_bold.nii.gz',\n '/sub-088/func/sub-088_task-MGT_run-03_bold.nii.gz',\n '/sub-064/fmap/sub-064_phasediff.json',\n '/sub-064/fmap/sub-064_magnitude1.nii.gz',\n '/sub-064/fmap/sub-064_phasediff.nii.gz',\n '/sub-064/fmap/sub-064_magnitude2.nii.gz',\n '/sub-064/anat/sub-064_T1w.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-03_events.tsv',\n '/sub-064/func/sub-064_task-MGT_run-04_bold.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-01_bold.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-04_events.tsv',\n '/sub-064/func/sub-064_task-MGT_run-02_bold.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-02_events.tsv',\n '/sub-064/func/sub-064_task-MGT_run-01_events.tsv',\n '/sub-064/func/sub-064_task-MGT_run-01_sbref.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-03_bold.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-03_sbref.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-02_sbref.nii.gz',\n '/sub-064/func/sub-064_task-MGT_run-04_sbref.nii.gz',\n '/sub-082/fmap/sub-082_magnitude2.nii.gz',\n '/sub-082/fmap/sub-082_phasediff.nii.gz',\n '/sub-082/fmap/sub-082_magnitude1.nii.gz',\n '/sub-082/fmap/sub-082_phasediff.json',\n '/sub-082/anat/sub-082_T1w.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-03_events.tsv',\n '/sub-082/func/sub-082_task-MGT_run-04_sbref.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-01_events.tsv',\n '/sub-082/func/sub-082_task-MGT_run-03_bold.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-01_bold.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-04_bold.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-02_sbref.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-02_events.tsv',\n '/sub-082/func/sub-082_task-MGT_run-02_bold.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-03_sbref.nii.gz',\n '/sub-082/func/sub-082_task-MGT_run-04_events.tsv',\n '/sub-082/func/sub-082_task-MGT_run-01_sbref.nii.gz',\n '/sub-074/fmap/sub-074_magnitude2.nii.gz',\n '/sub-074/fmap/sub-074_phasediff.json',\n '/sub-074/fmap/sub-074_magnitude1.nii.gz',\n '/sub-074/fmap/sub-074_phasediff.nii.gz',\n '/sub-074/anat/sub-074_T1w.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-03_sbref.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-04_bold.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-01_sbref.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-02_bold.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-04_events.tsv',\n '/sub-074/func/sub-074_task-MGT_run-03_events.tsv',\n '/sub-074/func/sub-074_task-MGT_run-01_events.tsv',\n '/sub-074/func/sub-074_task-MGT_run-04_sbref.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-03_bold.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-02_sbref.nii.gz',\n '/sub-074/func/sub-074_task-MGT_run-02_events.tsv',\n '/sub-074/func/sub-074_task-MGT_run-01_bold.nii.gz',\n '/T1w.json',\n '/sub-118/fmap/sub-118_magnitude2.nii.gz',\n '/sub-118/fmap/sub-118_phasediff.json',\n '/sub-118/fmap/sub-118_phasediff.nii.gz',\n '/sub-118/fmap/sub-118_magnitude1.nii.gz',\n '/sub-118/anat/sub-118_T1w.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-04_bold.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-04_sbref.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-03_bold.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-03_sbref.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-01_sbref.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-03_events.tsv',\n '/sub-118/func/sub-118_task-MGT_run-04_events.tsv',\n '/sub-118/func/sub-118_task-MGT_run-01_bold.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-01_events.tsv',\n '/sub-118/func/sub-118_task-MGT_run-02_events.tsv',\n '/sub-118/func/sub-118_task-MGT_run-02_sbref.nii.gz',\n '/sub-118/func/sub-118_task-MGT_run-02_bold.nii.gz',\n '/sub-043/fmap/sub-043_phasediff.nii.gz',\n '/sub-043/fmap/sub-043_phasediff.json',\n '/sub-043/fmap/sub-043_magnitude2.nii.gz',\n '/sub-043/fmap/sub-043_magnitude1.nii.gz',\n '/sub-043/anat/sub-043_T1w.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-02_sbref.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-03_bold.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-03_sbref.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-01_sbref.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-04_bold.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-02_events.tsv',\n '/sub-043/func/sub-043_task-MGT_run-04_sbref.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-03_events.tsv',\n '/sub-043/func/sub-043_task-MGT_run-02_bold.nii.gz',\n '/sub-043/func/sub-043_task-MGT_run-04_events.tsv',\n '/sub-043/func/sub-043_task-MGT_run-01_events.tsv',\n '/sub-043/func/sub-043_task-MGT_run-01_bold.nii.gz',\n '/sub-009/fmap/sub-009_phasediff.json',\n '/sub-009/fmap/sub-009_phasediff.nii.gz',\n '/sub-009/fmap/sub-009_magnitude2.nii.gz',\n '/sub-009/fmap/sub-009_magnitude1.nii.gz',\n '/sub-009/anat/sub-009_T1w.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-02_events.tsv',\n '/sub-009/func/sub-009_task-MGT_run-04_events.tsv',\n '/sub-009/func/sub-009_task-MGT_run-02_sbref.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-02_bold.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-03_events.tsv',\n '/sub-009/func/sub-009_task-MGT_run-03_sbref.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-03_bold.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-04_sbref.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-01_sbref.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-01_bold.nii.gz',\n '/sub-009/func/sub-009_task-MGT_run-01_events.tsv',\n '/sub-009/func/sub-009_task-MGT_run-04_bold.nii.gz',\n '/sub-035/fmap/sub-035_phasediff.json',\n '/sub-035/fmap/sub-035_phasediff.nii.gz',\n '/sub-035/fmap/sub-035_magnitude2.nii.gz',\n '/sub-035/fmap/sub-035_magnitude1.nii.gz',\n '/sub-035/anat/sub-035_T1w.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-04_events.tsv',\n '/sub-035/func/sub-035_task-MGT_run-03_bold.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-01_events.tsv',\n '/sub-035/func/sub-035_task-MGT_run-04_bold.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-02_bold.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-01_sbref.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-02_events.tsv',\n '/sub-035/func/sub-035_task-MGT_run-02_sbref.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-03_events.tsv',\n '/sub-035/func/sub-035_task-MGT_run-03_sbref.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-01_bold.nii.gz',\n '/sub-035/func/sub-035_task-MGT_run-04_sbref.nii.gz',\n '/sub-112/fmap/sub-112_magnitude2.nii.gz',\n '/sub-112/fmap/sub-112_magnitude1.nii.gz',\n '/sub-112/fmap/sub-112_phasediff.json',\n '/sub-112/fmap/sub-112_phasediff.nii.gz',\n '/sub-112/anat/sub-112_T1w.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-03_bold.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-01_sbref.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-02_sbref.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-03_sbref.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-04_bold.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-03_events.tsv',\n '/sub-112/func/sub-112_task-MGT_run-04_sbref.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-01_bold.nii.gz',\n '/sub-112/func/sub-112_task-MGT_run-02_events.tsv',\n '/sub-112/func/sub-112_task-MGT_run-04_events.tsv',\n '/sub-112/func/sub-112_task-MGT_run-01_events.tsv',\n '/sub-112/func/sub-112_task-MGT_run-02_bold.nii.gz',\n '/sub-089/fmap/sub-089_phasediff.nii.gz',\n '/sub-089/fmap/sub-089_phasediff.json',\n '/sub-089/fmap/sub-089_magnitude2.nii.gz',\n '/sub-089/fmap/sub-089_magnitude1.nii.gz',\n '/sub-089/anat/sub-089_T1w.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-03_sbref.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-03_events.tsv',\n '/sub-089/func/sub-089_task-MGT_run-03_bold.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-04_events.tsv',\n '/sub-089/func/sub-089_task-MGT_run-01_sbref.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-01_bold.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-02_bold.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-04_bold.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-04_sbref.nii.gz',\n '/sub-089/func/sub-089_task-MGT_run-01_events.tsv',\n '/sub-089/func/sub-089_task-MGT_run-02_events.tsv',\n '/sub-089/func/sub-089_task-MGT_run-02_sbref.nii.gz',\n '/sub-113/fmap/sub-113_phasediff.nii.gz',\n '/sub-113/fmap/sub-113_magnitude2.nii.gz',\n '/sub-113/fmap/sub-113_phasediff.json',\n '/sub-113/fmap/sub-113_magnitude1.nii.gz',\n '/sub-113/anat/sub-113_T1w.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-04_sbref.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-03_events.tsv',\n '/sub-113/func/sub-113_task-MGT_run-02_sbref.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-03_sbref.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-04_events.tsv',\n '/sub-113/func/sub-113_task-MGT_run-01_bold.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-04_bold.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-01_events.tsv',\n '/sub-113/func/sub-113_task-MGT_run-02_bold.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-02_events.tsv',\n '/sub-113/func/sub-113_task-MGT_run-01_sbref.nii.gz',\n '/sub-113/func/sub-113_task-MGT_run-03_bold.nii.gz',\n '/sub-109/fmap/sub-109_magnitude2.nii.gz',\n '/sub-109/fmap/sub-109_phasediff.json',\n '/sub-109/fmap/sub-109_magnitude1.nii.gz',\n '/sub-109/fmap/sub-109_phasediff.nii.gz',\n '/sub-109/anat/sub-109_T1w.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-03_bold.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-01_bold.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-04_bold.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-02_bold.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-02_sbref.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-01_events.tsv',\n '/sub-109/func/sub-109_task-MGT_run-02_events.tsv',\n '/sub-109/func/sub-109_task-MGT_run-03_events.tsv',\n '/sub-109/func/sub-109_task-MGT_run-04_events.tsv',\n '/sub-109/func/sub-109_task-MGT_run-04_sbref.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-03_sbref.nii.gz',\n '/sub-109/func/sub-109_task-MGT_run-01_sbref.nii.gz',\n '/dataset_description.json',\n '/sub-044/fmap/sub-044_phasediff.nii.gz',\n '/sub-044/fmap/sub-044_magnitude1.nii.gz',\n '/sub-044/fmap/sub-044_magnitude2.nii.gz',\n '/sub-044/fmap/sub-044_phasediff.json',\n '/sub-044/anat/sub-044_T1w.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-04_events.tsv',\n '/sub-044/func/sub-044_task-MGT_run-02_bold.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-03_bold.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-04_bold.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-01_bold.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-02_events.tsv',\n '/sub-044/func/sub-044_task-MGT_run-04_sbref.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-03_sbref.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-02_sbref.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-01_sbref.nii.gz',\n '/sub-044/func/sub-044_task-MGT_run-01_events.tsv',\n '/sub-044/func/sub-044_task-MGT_run-03_events.tsv',\n '/sub-002/fmap/sub-002_magnitude2.nii.gz',\n '/sub-002/fmap/sub-002_magnitude1.nii.gz',\n '/sub-002/fmap/sub-002_phasediff.json',\n '/sub-002/fmap/sub-002_phasediff.nii.gz',\n '/sub-002/anat/sub-002_T1w.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-02_bold.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-04_events.tsv',\n '/sub-002/func/sub-002_task-MGT_run-04_bold.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-03_bold.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-01_bold.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-03_sbref.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-01_sbref.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-04_sbref.nii.gz',\n '/sub-002/func/sub-002_task-MGT_run-01_events.tsv',\n '/sub-002/func/sub-002_task-MGT_run-02_events.tsv',\n '/sub-002/func/sub-002_task-MGT_run-03_events.tsv',\n '/sub-002/func/sub-002_task-MGT_run-02_sbref.nii.gz',\n '/sub-085/fmap/sub-085_magnitude1.nii.gz',\n '/sub-085/fmap/sub-085_magnitude2.nii.gz',\n '/sub-085/fmap/sub-085_phasediff.json',\n '/sub-085/fmap/sub-085_phasediff.nii.gz',\n '/sub-085/anat/sub-085_T1w.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-04_sbref.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-01_bold.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-02_events.tsv',\n '/sub-085/func/sub-085_task-MGT_run-02_bold.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-03_bold.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-03_sbref.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-03_events.tsv',\n '/sub-085/func/sub-085_task-MGT_run-01_events.tsv',\n '/sub-085/func/sub-085_task-MGT_run-04_bold.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-02_sbref.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-01_sbref.nii.gz',\n '/sub-085/func/sub-085_task-MGT_run-04_events.tsv',\n '/sub-022/fmap/sub-022_magnitude2.nii.gz',\n '/sub-022/fmap/sub-022_phasediff.nii.gz',\n '/sub-022/fmap/sub-022_magnitude1.nii.gz',\n '/sub-022/fmap/sub-022_phasediff.json',\n '/sub-022/anat/sub-022_T1w.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-04_sbref.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-04_bold.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-01_sbref.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-01_events.tsv',\n '/sub-022/func/sub-022_task-MGT_run-03_events.tsv',\n '/sub-022/func/sub-022_task-MGT_run-03_bold.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-03_sbref.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-01_bold.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-02_bold.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-04_events.tsv',\n '/sub-022/func/sub-022_task-MGT_run-02_sbref.nii.gz',\n '/sub-022/func/sub-022_task-MGT_run-02_events.tsv',\n '/sub-032/fmap/sub-032_magnitude1.nii.gz',\n '/sub-032/fmap/sub-032_phasediff.nii.gz',\n '/sub-032/fmap/sub-032_magnitude2.nii.gz',\n '/sub-032/fmap/sub-032_phasediff.json',\n '/sub-032/anat/sub-032_T1w.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-03_bold.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-04_bold.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-01_events.tsv',\n '/sub-032/func/sub-032_task-MGT_run-02_events.tsv',\n '/sub-032/func/sub-032_task-MGT_run-03_sbref.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-02_bold.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-04_sbref.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-01_bold.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-04_events.tsv',\n '/sub-032/func/sub-032_task-MGT_run-03_events.tsv',\n '/sub-032/func/sub-032_task-MGT_run-01_sbref.nii.gz',\n '/sub-032/func/sub-032_task-MGT_run-02_sbref.nii.gz',\n '/benchmark.py',\n '/sub-056/fmap/sub-056_phasediff.nii.gz',\n '/sub-056/fmap/sub-056_magnitude2.nii.gz',\n '/sub-056/fmap/sub-056_phasediff.json',\n '/sub-056/fmap/sub-056_magnitude1.nii.gz',\n '/sub-056/anat/sub-056_T1w.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-02_events.tsv',\n '/sub-056/func/sub-056_task-MGT_run-01_bold.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-03_events.tsv',\n '/sub-056/func/sub-056_task-MGT_run-03_sbref.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-03_bold.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-04_bold.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-01_sbref.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-01_events.tsv',\n '/sub-056/func/sub-056_task-MGT_run-02_bold.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-04_sbref.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-02_sbref.nii.gz',\n '/sub-056/func/sub-056_task-MGT_run-04_events.tsv',\n '/sub-049/fmap/sub-049_phasediff.nii.gz',\n '/sub-049/fmap/sub-049_magnitude1.nii.gz',\n '/sub-049/fmap/sub-049_phasediff.json',\n '/sub-049/fmap/sub-049_magnitude2.nii.gz',\n '/sub-049/anat/sub-049_T1w.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-02_events.tsv',\n '/sub-049/func/sub-049_task-MGT_run-01_events.tsv',\n '/sub-049/func/sub-049_task-MGT_run-03_events.tsv',\n '/sub-049/func/sub-049_task-MGT_run-01_sbref.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-03_sbref.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-01_bold.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-02_bold.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-02_sbref.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-04_sbref.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-04_events.tsv',\n '/sub-049/func/sub-049_task-MGT_run-04_bold.nii.gz',\n '/sub-049/func/sub-049_task-MGT_run-03_bold.nii.gz',\n '/sub-123/fmap/sub-123_magnitude1.nii.gz',\n '/sub-123/fmap/sub-123_phasediff.json',\n '/sub-123/fmap/sub-123_phasediff.nii.gz',\n '/sub-123/fmap/sub-123_magnitude2.nii.gz',\n '/sub-123/anat/sub-123_T1w.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-01_bold.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-04_events.tsv',\n '/sub-123/func/sub-123_task-MGT_run-04_sbref.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-02_sbref.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-02_events.tsv',\n '/sub-123/func/sub-123_task-MGT_run-03_events.tsv',\n '/sub-123/func/sub-123_task-MGT_run-02_bold.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-01_sbref.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-03_bold.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-04_bold.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-03_sbref.nii.gz',\n '/sub-123/func/sub-123_task-MGT_run-01_events.tsv',\n '/sub-066/fmap/sub-066_phasediff.nii.gz',\n '/sub-066/fmap/sub-066_phasediff.json',\n '/sub-066/fmap/sub-066_magnitude2.nii.gz',\n '/sub-066/fmap/sub-066_magnitude1.nii.gz',\n '/sub-066/anat/sub-066_T1w.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-04_sbref.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-01_bold.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-02_events.tsv',\n '/sub-066/func/sub-066_task-MGT_run-03_sbref.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-03_bold.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-02_sbref.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-01_sbref.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-01_events.tsv',\n '/sub-066/func/sub-066_task-MGT_run-02_bold.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-04_bold.nii.gz',\n '/sub-066/func/sub-066_task-MGT_run-03_events.tsv',\n '/sub-066/func/sub-066_task-MGT_run-04_events.tsv',\n '/sub-067/fmap/sub-067_phasediff.json',\n '/sub-067/fmap/sub-067_phasediff.nii.gz',\n '/sub-067/fmap/sub-067_magnitude2.nii.gz',\n '/sub-067/fmap/sub-067_magnitude1.nii.gz',\n '/sub-067/anat/sub-067_T1w.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-03_sbref.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-03_events.tsv',\n '/sub-067/func/sub-067_task-MGT_run-01_events.tsv',\n '/sub-067/func/sub-067_task-MGT_run-03_bold.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-04_events.tsv',\n '/sub-067/func/sub-067_task-MGT_run-01_sbref.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-01_bold.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-02_sbref.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-02_bold.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-04_bold.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-04_sbref.nii.gz',\n '/sub-067/func/sub-067_task-MGT_run-02_events.tsv',\n '/README',\n '/sub-003/fmap/sub-003_phasediff.json',\n '/sub-003/fmap/sub-003_magnitude2.nii.gz',\n '/sub-003/fmap/sub-003_magnitude1.nii.gz',\n '/sub-003/fmap/sub-003_phasediff.nii.gz',\n '/sub-003/anat/sub-003_T1w.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-03_events.tsv',\n '/sub-003/func/sub-003_task-MGT_run-04_sbref.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-03_bold.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-02_events.tsv',\n '/sub-003/func/sub-003_task-MGT_run-04_bold.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-01_bold.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-02_sbref.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-04_events.tsv',\n '/sub-003/func/sub-003_task-MGT_run-01_events.tsv',\n '/sub-003/func/sub-003_task-MGT_run-02_bold.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-01_sbref.nii.gz',\n '/sub-003/func/sub-003_task-MGT_run-03_sbref.nii.gz',\n '/sub-068/fmap/sub-068_magnitude1.nii.gz',\n '/sub-068/fmap/sub-068_phasediff.nii.gz',\n '/sub-068/fmap/sub-068_phasediff.json',\n '/sub-068/fmap/sub-068_magnitude2.nii.gz',\n '/sub-068/anat/sub-068_T1w.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-04_sbref.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-03_bold.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-01_sbref.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-02_bold.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-02_sbref.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-01_events.tsv',\n '/sub-068/func/sub-068_task-MGT_run-04_events.tsv',\n '/sub-068/func/sub-068_task-MGT_run-01_bold.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-03_sbref.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-04_bold.nii.gz',\n '/sub-068/func/sub-068_task-MGT_run-02_events.tsv',\n '/sub-068/func/sub-068_task-MGT_run-03_events.tsv',\n '/sub-011/fmap/sub-011_phasediff.nii.gz',\n '/sub-011/fmap/sub-011_magnitude1.nii.gz',\n '/sub-011/fmap/sub-011_magnitude2.nii.gz',\n '/sub-011/fmap/sub-011_phasediff.json',\n '/sub-011/anat/sub-011_T1w.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-04_sbref.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-03_sbref.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-02_events.tsv',\n '/sub-011/func/sub-011_task-MGT_run-01_sbref.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-02_bold.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-03_events.tsv',\n '/sub-011/func/sub-011_task-MGT_run-02_sbref.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-03_bold.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-04_bold.nii.gz',\n '/sub-011/func/sub-011_task-MGT_run-04_events.tsv',\n '/sub-011/func/sub-011_task-MGT_run-01_events.tsv',\n '/sub-011/func/sub-011_task-MGT_run-01_bold.nii.gz',\n '/sub-062/fmap/sub-062_phasediff.nii.gz',\n '/sub-062/fmap/sub-062_phasediff.json',\n '/sub-062/fmap/sub-062_magnitude2.nii.gz',\n '/sub-062/fmap/sub-062_magnitude1.nii.gz',\n '/sub-062/anat/sub-062_T1w.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-03_events.tsv',\n '/sub-062/func/sub-062_task-MGT_run-02_bold.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-01_bold.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-04_bold.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-02_events.tsv',\n '/sub-062/func/sub-062_task-MGT_run-04_sbref.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-02_sbref.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-04_events.tsv',\n '/sub-062/func/sub-062_task-MGT_run-03_sbref.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-01_sbref.nii.gz',\n '/sub-062/func/sub-062_task-MGT_run-01_events.tsv',\n '/sub-062/func/sub-062_task-MGT_run-03_bold.nii.gz',\n '/sub-041/fmap/sub-041_magnitude1.nii.gz',\n '/sub-041/fmap/sub-041_phasediff.nii.gz',\n '/sub-041/fmap/sub-041_magnitude2.nii.gz',\n '/sub-041/fmap/sub-041_phasediff.json',\n '/sub-041/anat/sub-041_T1w.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-04_bold.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-01_sbref.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-02_bold.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-02_events.tsv',\n '/sub-041/func/sub-041_task-MGT_run-04_events.tsv',\n '/sub-041/func/sub-041_task-MGT_run-01_events.tsv',\n '/sub-041/func/sub-041_task-MGT_run-03_events.tsv',\n '/sub-041/func/sub-041_task-MGT_run-04_sbref.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-03_sbref.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-01_bold.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-03_bold.nii.gz',\n '/sub-041/func/sub-041_task-MGT_run-02_sbref.nii.gz',\n '/sub-084/fmap/sub-084_phasediff.nii.gz',\n '/sub-084/fmap/sub-084_magnitude2.nii.gz',\n '/sub-084/fmap/sub-084_magnitude1.nii.gz',\n '/sub-084/fmap/sub-084_phasediff.json',\n '/sub-084/anat/sub-084_T1w.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-03_sbref.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-04_bold.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-02_events.tsv',\n '/sub-084/func/sub-084_task-MGT_run-03_events.tsv',\n '/sub-084/func/sub-084_task-MGT_run-03_bold.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-04_sbref.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-02_bold.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-01_sbref.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-02_sbref.nii.gz',\n '/sub-084/func/sub-084_task-MGT_run-01_events.tsv',\n '/sub-084/func/sub-084_task-MGT_run-04_events.tsv',\n '/sub-084/func/sub-084_task-MGT_run-01_bold.nii.gz',\n '/sub-095/fmap/sub-095_phasediff.json',\n '/sub-095/fmap/sub-095_magnitude1.nii.gz',\n '/sub-095/fmap/sub-095_phasediff.nii.gz',\n '/sub-095/fmap/sub-095_magnitude2.nii.gz',\n '/sub-095/anat/sub-095_T1w.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-02_sbref.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-03_events.tsv',\n '/sub-095/func/sub-095_task-MGT_run-04_bold.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-04_sbref.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-04_events.tsv',\n '/sub-095/func/sub-095_task-MGT_run-03_sbref.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-01_events.tsv',\n '/sub-095/func/sub-095_task-MGT_run-02_events.tsv',\n '/sub-095/func/sub-095_task-MGT_run-03_bold.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-01_sbref.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-02_bold.nii.gz',\n '/sub-095/func/sub-095_task-MGT_run-01_bold.nii.gz',\n '/sub-098/fmap/sub-098_magnitude2.nii.gz',\n '/sub-098/fmap/sub-098_phasediff.nii.gz',\n '/sub-098/fmap/sub-098_phasediff.json',\n '/sub-098/fmap/sub-098_magnitude1.nii.gz',\n '/sub-098/anat/sub-098_T1w.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-04_events.tsv',\n '/sub-098/func/sub-098_task-MGT_run-02_bold.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-04_sbref.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-03_events.tsv',\n '/sub-098/func/sub-098_task-MGT_run-01_sbref.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-02_sbref.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-02_events.tsv',\n '/sub-098/func/sub-098_task-MGT_run-03_bold.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-01_events.tsv',\n '/sub-098/func/sub-098_task-MGT_run-04_bold.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-03_sbref.nii.gz',\n '/sub-098/func/sub-098_task-MGT_run-01_bold.nii.gz',\n '/sub-096/fmap/sub-096_phasediff.nii.gz',\n '/sub-096/fmap/sub-096_magnitude1.nii.gz',\n '/sub-096/fmap/sub-096_magnitude2.nii.gz',\n '/sub-096/fmap/sub-096_phasediff.json',\n '/sub-096/anat/sub-096_T1w.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-03_sbref.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-04_events.tsv',\n '/sub-096/func/sub-096_task-MGT_run-02_bold.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-04_sbref.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-03_events.tsv',\n '/sub-096/func/sub-096_task-MGT_run-04_bold.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-03_bold.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-01_events.tsv',\n '/sub-096/func/sub-096_task-MGT_run-02_events.tsv',\n '/sub-096/func/sub-096_task-MGT_run-01_bold.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-02_sbref.nii.gz',\n '/sub-096/func/sub-096_task-MGT_run-01_sbref.nii.gz',\n '/sub-075/fmap/sub-075_magnitude1.nii.gz',\n '/sub-075/fmap/sub-075_phasediff.json',\n '/sub-075/fmap/sub-075_phasediff.nii.gz',\n '/sub-075/fmap/sub-075_magnitude2.nii.gz',\n '/sub-075/anat/sub-075_T1w.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-02_bold.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-04_events.tsv',\n '/sub-075/func/sub-075_task-MGT_run-04_sbref.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-03_events.tsv',\n '/sub-075/func/sub-075_task-MGT_run-01_events.tsv',\n '/sub-075/func/sub-075_task-MGT_run-03_bold.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-02_events.tsv',\n '/sub-075/func/sub-075_task-MGT_run-01_sbref.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-03_sbref.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-02_sbref.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-04_bold.nii.gz',\n '/sub-075/func/sub-075_task-MGT_run-01_bold.nii.gz',\n '/sub-033/fmap/sub-033_magnitude1.nii.gz',\n '/sub-033/fmap/sub-033_phasediff.json',\n '/sub-033/fmap/sub-033_magnitude2.nii.gz',\n '/sub-033/fmap/sub-033_phasediff.nii.gz',\n '/sub-033/anat/sub-033_T1w.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-01_sbref.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-03_bold.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-02_events.tsv',\n '/sub-033/func/sub-033_task-MGT_run-04_bold.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-04_sbref.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-01_events.tsv',\n '/sub-033/func/sub-033_task-MGT_run-03_sbref.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-02_bold.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-03_events.tsv',\n '/sub-033/func/sub-033_task-MGT_run-02_sbref.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-01_bold.nii.gz',\n '/sub-033/func/sub-033_task-MGT_run-04_events.tsv',\n '/sub-083/fmap/sub-083_phasediff.json',\n '/sub-083/fmap/sub-083_magnitude1.nii.gz',\n '/sub-083/fmap/sub-083_phasediff.nii.gz',\n '/sub-083/fmap/sub-083_magnitude2.nii.gz',\n '/sub-083/anat/sub-083_T1w.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-04_sbref.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-01_events.tsv',\n '/sub-083/func/sub-083_task-MGT_run-03_bold.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-04_events.tsv',\n '/sub-083/func/sub-083_task-MGT_run-02_bold.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-01_sbref.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-04_bold.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-03_sbref.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-01_bold.nii.gz',\n '/sub-083/func/sub-083_task-MGT_run-02_events.tsv',\n '/sub-083/func/sub-083_task-MGT_run-03_events.tsv',\n '/sub-083/func/sub-083_task-MGT_run-02_sbref.nii.gz',\n '/sub-019/fmap/sub-019_phasediff.nii.gz',\n '/sub-019/fmap/sub-019_phasediff.json',\n '/sub-019/fmap/sub-019_magnitude2.nii.gz',\n '/sub-019/fmap/sub-019_magnitude1.nii.gz',\n '/sub-019/anat/sub-019_T1w.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-02_bold.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-03_events.tsv',\n '/sub-019/func/sub-019_task-MGT_run-03_bold.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-01_bold.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-03_sbref.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-02_sbref.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-02_events.tsv',\n '/sub-019/func/sub-019_task-MGT_run-01_sbref.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-01_events.tsv',\n '/sub-019/func/sub-019_task-MGT_run-04_events.tsv',\n '/sub-019/func/sub-019_task-MGT_run-04_sbref.nii.gz',\n '/sub-019/func/sub-019_task-MGT_run-04_bold.nii.gz',\n '/sub-124/fmap/sub-124_magnitude2.nii.gz',\n '/sub-124/fmap/sub-124_phasediff.nii.gz',\n '/sub-124/fmap/sub-124_magnitude1.nii.gz',\n '/sub-124/fmap/sub-124_phasediff.json',\n '/sub-124/anat/sub-124_T1w.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-03_bold.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-04_bold.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-03_events.tsv',\n '/sub-124/func/sub-124_task-MGT_run-02_sbref.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-02_events.tsv',\n '/sub-124/func/sub-124_task-MGT_run-01_events.tsv',\n '/sub-124/func/sub-124_task-MGT_run-01_bold.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-04_events.tsv',\n '/sub-124/func/sub-124_task-MGT_run-02_bold.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-03_sbref.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-04_sbref.nii.gz',\n '/sub-124/func/sub-124_task-MGT_run-01_sbref.nii.gz',\n '/sub-055/fmap/sub-055_magnitude2.nii.gz',\n '/sub-055/fmap/sub-055_magnitude1.nii.gz',\n '/sub-055/fmap/sub-055_phasediff.json',\n '/sub-055/fmap/sub-055_phasediff.nii.gz',\n '/sub-055/anat/sub-055_T1w.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-01_sbref.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-03_bold.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-01_bold.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-04_bold.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-02_events.tsv',\n '/sub-055/func/sub-055_task-MGT_run-02_sbref.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-02_bold.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-04_sbref.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-03_sbref.nii.gz',\n '/sub-055/func/sub-055_task-MGT_run-01_events.tsv',\n '/sub-055/func/sub-055_task-MGT_run-04_events.tsv',\n '/sub-055/func/sub-055_task-MGT_run-03_events.tsv',\n '/sub-057/fmap/sub-057_phasediff.json',\n '/sub-057/fmap/sub-057_magnitude2.nii.gz',\n '/sub-057/fmap/sub-057_magnitude1.nii.gz',\n '/sub-057/fmap/sub-057_phasediff.nii.gz',\n '/sub-057/anat/sub-057_T1w.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-03_sbref.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-02_events.tsv',\n '/sub-057/func/sub-057_task-MGT_run-04_sbref.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-03_bold.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-02_bold.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-04_bold.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-04_events.tsv',\n '/sub-057/func/sub-057_task-MGT_run-01_events.tsv',\n '/sub-057/func/sub-057_task-MGT_run-02_sbref.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-01_sbref.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-01_bold.nii.gz',\n '/sub-057/func/sub-057_task-MGT_run-03_events.tsv',\n '/sub-071/fmap/sub-071_magnitude2.nii.gz',\n '/sub-071/fmap/sub-071_phasediff.json',\n '/sub-071/fmap/sub-071_phasediff.nii.gz',\n '/sub-071/fmap/sub-071_magnitude1.nii.gz',\n '/sub-071/anat/sub-071_T1w.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-04_bold.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-03_sbref.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-04_sbref.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-01_events.tsv',\n '/sub-071/func/sub-071_task-MGT_run-02_bold.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-03_bold.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-04_events.tsv',\n '/sub-071/func/sub-071_task-MGT_run-02_sbref.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-02_events.tsv',\n '/sub-071/func/sub-071_task-MGT_run-01_sbref.nii.gz',\n '/sub-071/func/sub-071_task-MGT_run-03_events.tsv',\n '/sub-071/func/sub-071_task-MGT_run-01_bold.nii.gz',\n '/sub-051/fmap/sub-051_magnitude1.nii.gz',\n '/sub-051/fmap/sub-051_magnitude2.nii.gz',\n '/sub-051/fmap/sub-051_phasediff.nii.gz',\n '/sub-051/fmap/sub-051_phasediff.json',\n '/sub-051/anat/sub-051_T1w.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-04_events.tsv',\n '/sub-051/func/sub-051_task-MGT_run-04_bold.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-04_sbref.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-02_sbref.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-01_sbref.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-01_bold.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-01_events.tsv',\n '/sub-051/func/sub-051_task-MGT_run-03_bold.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-02_bold.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-03_events.tsv',\n '/sub-051/func/sub-051_task-MGT_run-03_sbref.nii.gz',\n '/sub-051/func/sub-051_task-MGT_run-02_events.tsv',\n '/sub-072/fmap/sub-072_phasediff.nii.gz',\n '/sub-072/fmap/sub-072_magnitude1.nii.gz',\n '/sub-072/fmap/sub-072_magnitude2.nii.gz',\n '/sub-072/fmap/sub-072_phasediff.json',\n '/sub-072/anat/sub-072_T1w.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-03_sbref.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-04_events.tsv',\n '/sub-072/func/sub-072_task-MGT_run-04_bold.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-01_sbref.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-04_sbref.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-02_events.tsv',\n '/sub-072/func/sub-072_task-MGT_run-03_bold.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-01_bold.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-03_events.tsv',\n '/sub-072/func/sub-072_task-MGT_run-01_events.tsv',\n '/sub-072/func/sub-072_task-MGT_run-02_sbref.nii.gz',\n '/sub-072/func/sub-072_task-MGT_run-02_bold.nii.gz',\n '/sub-004/fmap/sub-004_phasediff.nii.gz',\n '/sub-004/fmap/sub-004_magnitude1.nii.gz',\n '/sub-004/fmap/sub-004_phasediff.json',\n '/sub-004/fmap/sub-004_magnitude2.nii.gz',\n '/sub-004/anat/sub-004_T1w.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-02_sbref.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-04_sbref.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-03_bold.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-02_bold.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-04_events.tsv',\n '/sub-004/func/sub-004_task-MGT_run-01_events.tsv',\n '/sub-004/func/sub-004_task-MGT_run-03_sbref.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-04_bold.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-01_bold.nii.gz',\n '/sub-004/func/sub-004_task-MGT_run-03_events.tsv',\n '/sub-004/func/sub-004_task-MGT_run-02_events.tsv',\n '/sub-004/func/sub-004_task-MGT_run-01_sbref.nii.gz',\n '/sub-046/fmap/sub-046_magnitude2.nii.gz',\n '/sub-046/fmap/sub-046_phasediff.json',\n '/sub-046/fmap/sub-046_phasediff.nii.gz',\n '/sub-046/fmap/sub-046_magnitude1.nii.gz',\n '/sub-046/anat/sub-046_T1w.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-01_sbref.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-02_sbref.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-02_bold.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-02_events.tsv',\n '/sub-046/func/sub-046_task-MGT_run-03_events.tsv',\n '/sub-046/func/sub-046_task-MGT_run-04_bold.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-04_sbref.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-01_events.tsv',\n '/sub-046/func/sub-046_task-MGT_run-03_bold.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-04_events.tsv',\n '/sub-046/func/sub-046_task-MGT_run-01_bold.nii.gz',\n '/sub-046/func/sub-046_task-MGT_run-03_sbref.nii.gz',\n '/sub-104/fmap/sub-104_magnitude1.nii.gz',\n '/sub-104/fmap/sub-104_phasediff.json',\n '/sub-104/fmap/sub-104_phasediff.nii.gz',\n '/sub-104/fmap/sub-104_magnitude2.nii.gz',\n '/sub-104/anat/sub-104_T1w.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-04_bold.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-04_sbref.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-01_bold.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-02_events.tsv',\n '/sub-104/func/sub-104_task-MGT_run-02_sbref.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-04_events.tsv',\n '/sub-104/func/sub-104_task-MGT_run-01_sbref.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-03_events.tsv',\n '/sub-104/func/sub-104_task-MGT_run-03_sbref.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-02_bold.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-03_bold.nii.gz',\n '/sub-104/func/sub-104_task-MGT_run-01_events.tsv',\n '/sub-024/fmap/sub-024_magnitude2.nii.gz',\n '/sub-024/fmap/sub-024_phasediff.nii.gz',\n '/sub-024/fmap/sub-024_magnitude1.nii.gz',\n '/sub-024/fmap/sub-024_phasediff.json',\n '/sub-024/anat/sub-024_T1w.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-02_bold.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-04_events.tsv',\n '/sub-024/func/sub-024_task-MGT_run-04_sbref.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-02_sbref.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-03_bold.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-03_events.tsv',\n '/sub-024/func/sub-024_task-MGT_run-04_bold.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-01_sbref.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-01_bold.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-02_events.tsv',\n '/sub-024/func/sub-024_task-MGT_run-03_sbref.nii.gz',\n '/sub-024/func/sub-024_task-MGT_run-01_events.tsv',\n '/sub-029/fmap/sub-029_phasediff.json',\n '/sub-029/fmap/sub-029_magnitude2.nii.gz',\n '/sub-029/fmap/sub-029_phasediff.nii.gz',\n '/sub-029/fmap/sub-029_magnitude1.nii.gz',\n '/sub-029/anat/sub-029_T1w.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-03_bold.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-04_sbref.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-02_bold.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-03_sbref.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-04_events.tsv',\n '/sub-029/func/sub-029_task-MGT_run-04_bold.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-01_bold.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-01_events.tsv',\n '/sub-029/func/sub-029_task-MGT_run-02_sbref.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-01_sbref.nii.gz',\n '/sub-029/func/sub-029_task-MGT_run-02_events.tsv',\n '/sub-029/func/sub-029_task-MGT_run-03_events.tsv',\n '/sub-100/fmap/sub-100_magnitude2.nii.gz',\n '/sub-100/fmap/sub-100_phasediff.nii.gz',\n '/sub-100/fmap/sub-100_magnitude1.nii.gz',\n '/sub-100/fmap/sub-100_phasediff.json',\n '/sub-100/anat/sub-100_T1w.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-02_bold.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-03_sbref.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-04_sbref.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-02_events.tsv',\n '/sub-100/func/sub-100_task-MGT_run-01_bold.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-03_bold.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-02_sbref.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-04_bold.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-01_sbref.nii.gz',\n '/sub-100/func/sub-100_task-MGT_run-04_events.tsv',\n '/sub-100/func/sub-100_task-MGT_run-03_events.tsv',\n '/sub-100/func/sub-100_task-MGT_run-01_events.tsv',\n '/sub-017/fmap/sub-017_magnitude2.nii.gz',\n '/sub-017/fmap/sub-017_magnitude1.nii.gz',\n '/sub-017/fmap/sub-017_phasediff.json',\n '/sub-017/fmap/sub-017_phasediff.nii.gz',\n '/sub-017/anat/sub-017_T1w.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-04_events.tsv',\n '/sub-017/func/sub-017_task-MGT_run-04_bold.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-03_bold.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-02_bold.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-02_sbref.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-02_events.tsv',\n '/sub-017/func/sub-017_task-MGT_run-01_events.tsv',\n '/sub-017/func/sub-017_task-MGT_run-03_events.tsv',\n '/sub-017/func/sub-017_task-MGT_run-01_sbref.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-03_sbref.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-04_sbref.nii.gz',\n '/sub-017/func/sub-017_task-MGT_run-01_bold.nii.gz',\n '/sub-108/fmap/sub-108_phasediff.nii.gz',\n '/sub-108/fmap/sub-108_magnitude1.nii.gz',\n '/sub-108/fmap/sub-108_phasediff.json',\n '/sub-108/fmap/sub-108_magnitude2.nii.gz',\n '/sub-108/anat/sub-108_T1w.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-03_bold.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-03_events.tsv',\n '/sub-108/func/sub-108_task-MGT_run-04_events.tsv',\n '/sub-108/func/sub-108_task-MGT_run-03_sbref.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-02_events.tsv',\n '/sub-108/func/sub-108_task-MGT_run-04_sbref.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-01_events.tsv',\n '/sub-108/func/sub-108_task-MGT_run-04_bold.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-01_bold.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-02_sbref.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-01_sbref.nii.gz',\n '/sub-108/func/sub-108_task-MGT_run-02_bold.nii.gz',\n '/sub-120/fmap/sub-120_magnitude1.nii.gz',\n '/sub-120/fmap/sub-120_phasediff.json',\n '/sub-120/fmap/sub-120_magnitude2.nii.gz',\n '/sub-120/fmap/sub-120_phasediff.nii.gz',\n '/sub-120/anat/sub-120_T1w.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-01_sbref.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-01_bold.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-04_bold.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-02_sbref.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-03_bold.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-01_events.tsv',\n '/sub-120/func/sub-120_task-MGT_run-02_bold.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-04_events.tsv',\n '/sub-120/func/sub-120_task-MGT_run-04_sbref.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-02_events.tsv',\n '/sub-120/func/sub-120_task-MGT_run-03_sbref.nii.gz',\n '/sub-120/func/sub-120_task-MGT_run-03_events.tsv',\n '/sub-045/fmap/sub-045_phasediff.nii.gz',\n '/sub-045/fmap/sub-045_phasediff.json',\n '/sub-045/fmap/sub-045_magnitude1.nii.gz',\n '/sub-045/fmap/sub-045_magnitude2.nii.gz',\n '/sub-045/anat/sub-045_T1w.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-03_sbref.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-04_events.tsv',\n '/sub-045/func/sub-045_task-MGT_run-04_sbref.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-02_events.tsv',\n '/sub-045/func/sub-045_task-MGT_run-04_bold.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-02_sbref.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-03_bold.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-01_bold.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-01_events.tsv',\n '/sub-045/func/sub-045_task-MGT_run-02_bold.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-01_sbref.nii.gz',\n '/sub-045/func/sub-045_task-MGT_run-03_events.tsv',\n '/sub-060/fmap/sub-060_phasediff.json',\n '/sub-060/fmap/sub-060_magnitude2.nii.gz',\n '/sub-060/fmap/sub-060_magnitude1.nii.gz',\n '/sub-060/fmap/sub-060_phasediff.nii.gz',\n '/sub-060/anat/sub-060_T1w.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-03_events.tsv',\n '/sub-060/func/sub-060_task-MGT_run-04_events.tsv',\n '/sub-060/func/sub-060_task-MGT_run-01_bold.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-04_sbref.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-02_bold.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-01_sbref.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-02_events.tsv',\n '/sub-060/func/sub-060_task-MGT_run-03_bold.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-03_sbref.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-02_sbref.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-04_bold.nii.gz',\n '/sub-060/func/sub-060_task-MGT_run-01_events.tsv',\n '/sub-050/fmap/sub-050_phasediff.nii.gz',\n '/sub-050/fmap/sub-050_phasediff.json',\n '/sub-050/fmap/sub-050_magnitude1.nii.gz',\n '/sub-050/fmap/sub-050_magnitude2.nii.gz',\n '/sub-050/anat/sub-050_T1w.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-03_sbref.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-02_events.tsv',\n '/sub-050/func/sub-050_task-MGT_run-01_bold.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-04_sbref.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-04_bold.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-03_events.tsv',\n '/sub-050/func/sub-050_task-MGT_run-01_sbref.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-01_events.tsv',\n '/sub-050/func/sub-050_task-MGT_run-02_sbref.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-02_bold.nii.gz',\n '/sub-050/func/sub-050_task-MGT_run-04_events.tsv',\n '/sub-050/func/sub-050_task-MGT_run-03_bold.nii.gz',\n '/sub-107/fmap/sub-107_magnitude2.nii.gz',\n '/sub-107/fmap/sub-107_magnitude1.nii.gz',\n '/sub-107/fmap/sub-107_phasediff.json',\n '/sub-107/fmap/sub-107_phasediff.nii.gz',\n '/sub-107/anat/sub-107_T1w.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-03_events.tsv',\n '/sub-107/func/sub-107_task-MGT_run-04_sbref.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-04_events.tsv',\n '/sub-107/func/sub-107_task-MGT_run-01_bold.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-04_bold.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-02_bold.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-01_sbref.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-03_sbref.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-02_sbref.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-01_events.tsv',\n '/sub-107/func/sub-107_task-MGT_run-03_bold.nii.gz',\n '/sub-107/func/sub-107_task-MGT_run-02_events.tsv',\n]\n\n// EEG\nexport const ds002718 = [\n '/sub-005/eeg/sub-005_task-FaceRecognition_events.json',\n '/sub-005/eeg/sub-005_task-FaceRecognition_channels.tsv',\n '/sub-005/eeg/sub-005_task-FaceRecognition_electrodes.tsv',\n '/sub-005/eeg/sub-005_task-FaceRecognition_eeg.set',\n '/sub-005/eeg/sub-005_task-FaceRecognition_events.tsv',\n '/sub-005/eeg/sub-005_task-FaceRecognition_coordsystem.json',\n '/sub-005/eeg/sub-005_task-FaceRecognition_eeg.json',\n '/sub-005/anat/sub-005_mod-T1w_defacemask.nii.gz',\n '/CHANGES',\n '/sub-008/eeg/sub-008_task-FaceRecognition_electrodes.tsv',\n '/sub-008/eeg/sub-008_task-FaceRecognition_eeg.set',\n '/sub-008/eeg/sub-008_task-FaceRecognition_coordsystem.json',\n '/sub-008/eeg/sub-008_task-FaceRecognition_eeg.json',\n '/sub-008/eeg/sub-008_task-FaceRecognition_events.json',\n '/sub-008/eeg/sub-008_task-FaceRecognition_channels.tsv',\n '/sub-008/eeg/sub-008_task-FaceRecognition_events.tsv',\n '/sub-008/anat/sub-008_mod-T1w_defacemask.nii.gz',\n '/participants.json',\n '/participants.tsv',\n '/sub-015/eeg/sub-015_task-FaceRecognition_events.json',\n '/sub-015/eeg/sub-015_task-FaceRecognition_electrodes.tsv',\n '/sub-015/eeg/sub-015_task-FaceRecognition_coordsystem.json',\n '/sub-015/eeg/sub-015_task-FaceRecognition_eeg.set',\n '/sub-015/eeg/sub-015_task-FaceRecognition_channels.tsv',\n '/sub-015/eeg/sub-015_task-FaceRecognition_eeg.json',\n '/sub-015/eeg/sub-015_task-FaceRecognition_events.tsv',\n '/sub-015/anat/sub-015_mod-T1w_defacemask.nii.gz',\n '/sub-007/eeg/sub-007_task-FaceRecognition_eeg.json',\n '/sub-007/eeg/sub-007_task-FaceRecognition_coordsystem.json',\n '/sub-007/eeg/sub-007_task-FaceRecognition_channels.tsv',\n '/sub-007/eeg/sub-007_task-FaceRecognition_electrodes.tsv',\n '/sub-007/eeg/sub-007_task-FaceRecognition_eeg.set',\n '/sub-007/eeg/sub-007_task-FaceRecognition_events.json',\n '/sub-007/eeg/sub-007_task-FaceRecognition_events.tsv',\n '/sub-007/anat/sub-007_mod-T1w_defacemask.nii.gz',\n '/sub-016/eeg/sub-016_task-FaceRecognition_coordsystem.json',\n '/sub-016/eeg/sub-016_task-FaceRecognition_channels.tsv',\n '/sub-016/eeg/sub-016_task-FaceRecognition_eeg.json',\n '/sub-016/eeg/sub-016_task-FaceRecognition_events.json',\n '/sub-016/eeg/sub-016_task-FaceRecognition_events.tsv',\n '/sub-016/eeg/sub-016_task-FaceRecognition_electrodes.tsv',\n '/sub-016/eeg/sub-016_task-FaceRecognition_eeg.set',\n '/sub-016/anat/sub-016_mod-T1w_defacemask.nii.gz',\n '/sub-012/eeg/sub-012_task-FaceRecognition_events.json',\n '/sub-012/eeg/sub-012_task-FaceRecognition_eeg.set',\n '/sub-012/eeg/sub-012_task-FaceRecognition_eeg.json',\n '/sub-012/eeg/sub-012_task-FaceRecognition_events.tsv',\n '/sub-012/eeg/sub-012_task-FaceRecognition_coordsystem.json',\n '/sub-012/eeg/sub-012_task-FaceRecognition_electrodes.tsv',\n '/sub-012/eeg/sub-012_task-FaceRecognition_channels.tsv',\n '/sub-012/anat/sub-012_mod-T1w_defacemask.nii.gz',\n '/sub-010/eeg/sub-010_task-FaceRecognition_eeg.set',\n '/sub-010/eeg/sub-010_task-FaceRecognition_eeg.json',\n '/sub-010/eeg/sub-010_task-FaceRecognition_events.json',\n '/sub-010/eeg/sub-010_task-FaceRecognition_coordsystem.json',\n '/sub-010/eeg/sub-010_task-FaceRecognition_events.tsv',\n '/sub-010/eeg/sub-010_task-FaceRecognition_electrodes.tsv',\n '/sub-010/eeg/sub-010_task-FaceRecognition_channels.tsv',\n '/sub-010/anat/sub-010_mod-T1w_defacemask.nii.gz',\n '/sub-014/eeg/sub-014_task-FaceRecognition_events.json',\n '/sub-014/eeg/sub-014_task-FaceRecognition_eeg.set',\n '/sub-014/eeg/sub-014_task-FaceRecognition_coordsystem.json',\n '/sub-014/eeg/sub-014_task-FaceRecognition_events.tsv',\n '/sub-014/eeg/sub-014_task-FaceRecognition_electrodes.tsv',\n '/sub-014/eeg/sub-014_task-FaceRecognition_eeg.json',\n '/sub-014/eeg/sub-014_task-FaceRecognition_channels.tsv',\n '/sub-014/anat/sub-014_mod-T1w_defacemask.nii.gz',\n '/sub-006/eeg/sub-006_task-FaceRecognition_eeg.json',\n '/sub-006/eeg/sub-006_task-FaceRecognition_coordsystem.json',\n '/sub-006/eeg/sub-006_task-FaceRecognition_electrodes.tsv',\n '/sub-006/eeg/sub-006_task-FaceRecognition_eeg.set',\n '/sub-006/eeg/sub-006_task-FaceRecognition_channels.tsv',\n '/sub-006/eeg/sub-006_task-FaceRecognition_events.tsv',\n '/sub-006/eeg/sub-006_task-FaceRecognition_events.json',\n '/sub-006/anat/sub-006_mod-T1w_defacemask.nii.gz',\n '/sub-009/eeg/sub-009_task-FaceRecognition_channels.tsv',\n '/sub-009/eeg/sub-009_task-FaceRecognition_coordsystem.json',\n '/sub-009/eeg/sub-009_task-FaceRecognition_eeg.json',\n '/sub-009/eeg/sub-009_task-FaceRecognition_events.json',\n '/sub-009/eeg/sub-009_task-FaceRecognition_events.tsv',\n '/sub-009/eeg/sub-009_task-FaceRecognition_eeg.set',\n '/sub-009/eeg/sub-009_task-FaceRecognition_electrodes.tsv',\n '/sub-009/anat/sub-009_mod-T1w_defacemask.nii.gz',\n '/dataset_description.json',\n '/sub-002/eeg/sub-002_task-FaceRecognition_coordsystem.json',\n '/sub-002/eeg/sub-002_task-FaceRecognition_events.json',\n '/sub-002/eeg/sub-002_task-FaceRecognition_electrodes.tsv',\n '/sub-002/eeg/sub-002_task-FaceRecognition_eeg.json',\n '/sub-002/eeg/sub-002_task-FaceRecognition_eeg.set',\n '/sub-002/eeg/sub-002_task-FaceRecognition_events.tsv',\n '/sub-002/eeg/sub-002_task-FaceRecognition_channels.tsv',\n '/sub-002/anat/sub-002_mod-T1w_defacemask.nii.gz',\n '/README',\n '/sub-003/eeg/sub-003_task-FaceRecognition_eeg.set',\n '/sub-003/eeg/sub-003_task-FaceRecognition_events.tsv',\n '/sub-003/eeg/sub-003_task-FaceRecognition_electrodes.tsv',\n '/sub-003/eeg/sub-003_task-FaceRecognition_channels.tsv',\n '/sub-003/eeg/sub-003_task-FaceRecognition_eeg.json',\n '/sub-003/eeg/sub-003_task-FaceRecognition_events.json',\n '/sub-003/eeg/sub-003_task-FaceRecognition_coordsystem.json',\n '/sub-003/anat/sub-003_mod-T1w_defacemask.nii.gz',\n '/sub-011/eeg/sub-011_task-FaceRecognition_electrodes.tsv',\n '/sub-011/eeg/sub-011_task-FaceRecognition_eeg.set',\n '/sub-011/eeg/sub-011_task-FaceRecognition_events.json',\n '/sub-011/eeg/sub-011_task-FaceRecognition_eeg.json',\n '/sub-011/eeg/sub-011_task-FaceRecognition_channels.tsv',\n '/sub-011/eeg/sub-011_task-FaceRecognition_events.tsv',\n '/sub-011/eeg/sub-011_task-FaceRecognition_coordsystem.json',\n '/sub-011/anat/sub-011_mod-T1w_defacemask.nii.gz',\n '/sub-019/eeg/sub-019_task-FaceRecognition_channels.tsv',\n '/sub-019/eeg/sub-019_task-FaceRecognition_eeg.json',\n '/sub-019/eeg/sub-019_task-FaceRecognition_eeg.set',\n '/sub-019/eeg/sub-019_task-FaceRecognition_events.json',\n '/sub-019/eeg/sub-019_task-FaceRecognition_coordsystem.json',\n '/sub-019/eeg/sub-019_task-FaceRecognition_events.tsv',\n '/sub-019/eeg/sub-019_task-FaceRecognition_electrodes.tsv',\n '/sub-019/anat/sub-019_mod-T1w_defacemask.nii.gz',\n '/sub-004/eeg/sub-004_task-FaceRecognition_events.json',\n '/sub-004/eeg/sub-004_task-FaceRecognition_coordsystem.json',\n '/sub-004/eeg/sub-004_task-FaceRecognition_events.tsv',\n '/sub-004/eeg/sub-004_task-FaceRecognition_electrodes.tsv',\n '/sub-004/eeg/sub-004_task-FaceRecognition_eeg.set',\n '/sub-004/eeg/sub-004_task-FaceRecognition_eeg.json',\n '/sub-004/eeg/sub-004_task-FaceRecognition_channels.tsv',\n '/sub-004/anat/sub-004_mod-T1w_defacemask.nii.gz',\n '/sub-017/eeg/sub-017_task-FaceRecognition_channels.tsv',\n '/sub-017/eeg/sub-017_task-FaceRecognition_eeg.json',\n '/sub-017/eeg/sub-017_task-FaceRecognition_events.tsv',\n '/sub-017/eeg/sub-017_task-FaceRecognition_coordsystem.json',\n '/sub-017/eeg/sub-017_task-FaceRecognition_electrodes.tsv',\n '/sub-017/eeg/sub-017_task-FaceRecognition_events.json',\n '/sub-017/eeg/sub-017_task-FaceRecognition_eeg.set',\n '/sub-017/anat/sub-017_mod-T1w_defacemask.nii.gz',\n]\n\n// iEEG\nexport const ds003400 = [\n '/sub-RESP0059/sub-RESP0059_scans.json',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_channels.tsv',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_ieeg.vmrk',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_ieeg.eeg',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_coordsystem.json',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_events.json',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_electrodes.json',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_electrodes.tsv',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_events.tsv',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_ieeg.vhdr',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_photo.jpg',\n '/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_ieeg.json',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_ieeg.vmrk',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_channels.tsv',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_electrodes.json',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_coordsystem.json',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_photo.jpg',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_ieeg.vhdr',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_electrodes.tsv',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_ieeg.json',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_events.json',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_ieeg.eeg',\n '/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_events.tsv',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_ieeg.json',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_photo.jpg',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_channels.tsv',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_events.json',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_ieeg.eeg',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_electrodes.tsv',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_ieeg.vhdr',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_coordsystem.json',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_electrodes.json',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_ieeg.vmrk',\n '/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_events.tsv',\n '/sub-RESP0059/sub-RESP0059_scans.tsv',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_channels.tsv',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_electrodes.json',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_events.json',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_ieeg.eeg',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_coordsystem.json',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_electrodes.tsv',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_ieeg.vhdr',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_ieeg.json',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_ieeg.vmrk',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_photo.jpg',\n '/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_events.tsv',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_ieeg.json',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_events.json',\n '/.gitattributes',\n '/CHANGES',\n '/participants.json',\n '/participants.tsv',\n '/sub-RESP0280/sub-RESP0280_scans.json',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_ieeg.json',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_photo.jpg',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_events.json',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_channels.tsv',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_electrodes.tsv',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_electrodes.json',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_coordsystem.json',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_events.tsv',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_ieeg.vhdr',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_ieeg.eeg',\n '/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_ieeg.vmrk',\n '/sub-RESP0280/sub-RESP0280_scans.tsv',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_events.json',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_electrodes.json',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_electrodes.tsv',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_ieeg.vhdr',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_ieeg.vmrk',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_coordsystem.json',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_photo.jpg',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_ieeg.eeg',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_channels.tsv',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_events.tsv',\n '/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_ieeg.json',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_events.json',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_ieeg.json',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_channels.tsv',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_ieeg.vhdr',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_ieeg.vmrk',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_photo.jpg',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_events.tsv',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_electrodes.tsv',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_coordsystem.json',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_ieeg.eeg',\n '/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_electrodes.json',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_ieeg.json',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_events.json',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_events.json',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_ieeg.vmrk',\n '/sub-RESP0465/sub-RESP0465_scans.tsv',\n '/sub-RESP0465/sub-RESP0465_scans.json',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_events.json',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_events.json',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_events.json',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_events.json',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_electrodes.json',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_photo.jpg',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_electrodes.json',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_events.json',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_electrodes.json',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_photo.jpg',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_events.json',\n '/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_events.json',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_events.json',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_photo.jpg',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_electrodes.json',\n '/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_events.json',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_ieeg.json',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0301/sub-RESP0301_scans.json',\n '/sub-RESP0301/sub-RESP0301_scans.tsv',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_electrodes.tsv',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_events.json',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_coordsystem.json',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_ieeg.vmrk',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_ieeg.vhdr',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_photo.jpg',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_events.tsv',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_ieeg.json',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_ieeg.eeg',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_electrodes.json',\n '/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_channels.tsv',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_electrodes.tsv',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_electrodes.json',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_channels.tsv',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_photo.jpg',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_events.tsv',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_events.json',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_ieeg.vhdr',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_ieeg.json',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_ieeg.eeg',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_ieeg.vmrk',\n '/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_coordsystem.json',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_ieeg.vmrk',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_coordsystem.json',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_photo.jpg',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_events.json',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_channels.tsv',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_ieeg.json',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_ieeg.vhdr',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_electrodes.tsv',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_ieeg.eeg',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_electrodes.json',\n '/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_events.tsv',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_electrodes.tsv',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_channels.tsv',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_events.tsv',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_events.json',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_coordsystem.json',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_ieeg.json',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_ieeg.vmrk',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_ieeg.vhdr',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_electrodes.json',\n '/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_ieeg.eeg',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_events.json',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_ieeg.json',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/.datalad/.gitattributes',\n '/.datalad/config',\n '/dataset_description.json',\n '/README',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_photo.jpg',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_electrodes.json',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_events.json',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_photo.jpg',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_events.json',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_electrodes.json',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_electrodes.json',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_photo.jpg',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_events.json',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_events.tsv',\n '/sub-RESP0384/sub-RESP0384_scans.json',\n '/sub-RESP0384/sub-RESP0384_scans.tsv',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_events.json',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_electrodes.json',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_photo.jpg',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_electrodes.json',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_photo.jpg',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_events.json',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_events.json',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_electrodes.json',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_photo.jpg',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_electrodes.json',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_events.json',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_photo.jpg',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_coordsystem.json',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_electrodes.json',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_events.json',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_photo.jpg',\n '/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_ieeg.json',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_events.json',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0356/sub-RESP0356_scans.json',\n '/sub-RESP0356/sub-RESP0356_scans.tsv',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_ieeg.json',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_ieeg.vhdr',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_ieeg.vmrk',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_events.tsv',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_coordsystem.json',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_electrodes.json',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_ieeg.eeg',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_events.json',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_photo.jpg',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_electrodes.tsv',\n '/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_channels.tsv',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_ieeg.vmrk',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_events.json',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_channels.tsv',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_electrodes.json',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_ieeg.vhdr',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_ieeg.eeg',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_ieeg.json',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_electrodes.tsv',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_coordsystem.json',\n '/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_events.tsv',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_electrodes.tsv',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_photo.jpg',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_ieeg.vhdr',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_channels.tsv',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_events.tsv',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_electrodes.json',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_events.json',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_coordsystem.json',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_ieeg.json',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_ieeg.vmrk',\n '/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_ieeg.eeg',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_ieeg.json',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_electrodes.tsv',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_electrodes.json',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_ieeg.vhdr',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_ieeg.vmrk',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_coordsystem.json',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_photo.jpg',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_events.json',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_channels.tsv',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_ieeg.eeg',\n '/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_events.tsv',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_channels.tsv',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_photo.jpg',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_coordsystem.json',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_ieeg.eeg',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_ieeg.vmrk',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_events.tsv',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_events.json',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_electrodes.json',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_ieeg.vhdr',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_electrodes.tsv',\n '/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_ieeg.json',\n '/sourcedata/sub-RESP0059/ses-SITUATION3A/ieeg/sub-RESP0059_ses-SITUATION3A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0059/ses-SITUATION4A/ieeg/sub-RESP0059_ses-SITUATION4A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0059/ses-SITUATION2A/ieeg/sub-RESP0059_ses-SITUATION2A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0059/ses-SITUATION1B/ieeg/sub-RESP0059_ses-SITUATION1B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0059/ses-SITUATION1A/ieeg/sub-RESP0059_ses-SITUATION1A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0280/ses-SITUATION3A/ieeg/sub-RESP0280_ses-SITUATION3A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0280/ses-SITUATION2A/ieeg/sub-RESP0280_ses-SITUATION2A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0280/ses-SITUATION1B/ieeg/sub-RESP0280_ses-SITUATION1B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0280/ses-SITUATION1A/ieeg/sub-RESP0280_ses-SITUATION1A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3G/ieeg/sub-RESP0465_ses-SITUATION3G_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3C/ieeg/sub-RESP0465_ses-SITUATION3C_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3E/ieeg/sub-RESP0465_ses-SITUATION3E_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3A/ieeg/sub-RESP0465_ses-SITUATION3A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3F/ieeg/sub-RESP0465_ses-SITUATION3F_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION2A/ieeg/sub-RESP0465_ses-SITUATION2A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION2B/ieeg/sub-RESP0465_ses-SITUATION2B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3D/ieeg/sub-RESP0465_ses-SITUATION3D_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION3B/ieeg/sub-RESP0465_ses-SITUATION3B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0465/ses-SITUATION1A/ieeg/sub-RESP0465_ses-SITUATION1A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0301/ses-SITUATION2A/ieeg/sub-RESP0301_ses-SITUATION2A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0301/ses-SITUATION2B/ieeg/sub-RESP0301_ses-SITUATION2B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0301/ses-SITUATION2C/ieeg/sub-RESP0301_ses-SITUATION2C_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0301/ses-SITUATION1B/ieeg/sub-RESP0301_ses-SITUATION1B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0301/ses-SITUATION1A/ieeg/sub-RESP0301_ses-SITUATION1A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION2D/ieeg/sub-RESP0384_ses-SITUATION2D_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION1D/ieeg/sub-RESP0384_ses-SITUATION1D_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION3A/ieeg/sub-RESP0384_ses-SITUATION3A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION2A/ieeg/sub-RESP0384_ses-SITUATION2A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION2B/ieeg/sub-RESP0384_ses-SITUATION2B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION1C/ieeg/sub-RESP0384_ses-SITUATION1C_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION2C/ieeg/sub-RESP0384_ses-SITUATION2C_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION1B/ieeg/sub-RESP0384_ses-SITUATION1B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0384/ses-SITUATION1A/ieeg/sub-RESP0384_ses-SITUATION1A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0356/ses-SITUATION3A/ieeg/sub-RESP0356_ses-SITUATION3A_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0356/ses-SITUATION2APART2/ieeg/sub-RESP0356_ses-SITUATION2APART2_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0356/ses-SITUATION2APART1/ieeg/sub-RESP0356_ses-SITUATION2APART1_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0356/ses-SITUATION1B/ieeg/sub-RESP0356_ses-SITUATION1B_task-acute_ieeg.TRC',\n '/sourcedata/sub-RESP0356/ses-SITUATION1A/ieeg/sub-RESP0356_ses-SITUATION1A_task-acute_ieeg.TRC',\n]\n" }, { "alpha_fraction": 0.6358974575996399, "alphanum_fraction": 0.6376068592071533, "avg_line_length": 24.434782028198242, "blob_id": "9e30b77ce0e882b7534cb59e1d1bc4c9620d7eb9", "content_id": "ecea9af59555f7626aeab6c9e602c5b7c5841df6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 585, "license_type": "permissive", "max_line_length": 55, "num_lines": 23, "path": "/bids-validator/utils/files/collectDirectorySize.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import isNode from '../isNode'\nimport getFileStats from './getFileStats'\n\nconst collectDirectorySize = (fileList) => {\n let size = 0\n const keys = Object.keys(fileList)\n keys.forEach((key) => {\n const file = fileList[key]\n // collect file stats\n if (file.size) {\n // from File api in browser\n size += file.size\n // or from git-annex metadata when in gitTreeMode\n if (isNode) file.stats = { size: file.size }\n } else {\n file.stats = getFileStats(file)\n size += file.stats.size\n }\n })\n return size\n}\n\nexport default collectDirectorySize\n" }, { "alpha_fraction": 0.7247074842453003, "alphanum_fraction": 0.7260839939117432, "avg_line_length": 25.418182373046875, "blob_id": "e0b88946b44d9785f12a81c0fb41e22c94eace13", "content_id": "4446089b09dafcd68532a30bf98f52ac6e3044c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1453, "license_type": "permissive", "max_line_length": 65, "num_lines": 55, "path": "/bids-validator/utils/summary/collectSummary.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import files from '../files'\nimport checkForDerivatives from './checkForDerivatives'\nimport collectDataTypes from './collectDataTypes'\nimport collectModalities from './collectModalities'\nimport collectSessions from './collectSessions'\nimport collectSubjects from './collectSubjects'\n\nconst collectSummary = (fileList, options) => {\n const summary = {\n sessions: [],\n subjects: [],\n subjectMetadata: {},\n tasks: [],\n modalities: [],\n secondaryModalities: [],\n totalFiles: -1,\n size: 0,\n dataProcessed: false,\n pet: null,\n }\n\n summary.dataProcessed = checkForDerivatives(fileList)\n\n // remove ignored files from list:\n Object.keys(fileList).forEach(function (key) {\n if (fileList[key].ignore) {\n delete fileList[key]\n }\n })\n\n summary.totalFiles = Object.keys(fileList).length\n\n const relativePaths = Object.keys(fileList).map(\n (file) => fileList[file].relativePath,\n )\n\n //collect file directory statistics\n summary.size = files.collectDirectorySize(fileList)\n\n // collect modalities for summary\n const { primary, secondary } = collectModalities(relativePaths)\n summary.modalities = primary\n summary.secondaryModalities = secondary\n summary.dataTypes = collectDataTypes(relativePaths)\n\n // collect subjects\n summary.subjects = collectSubjects(fileList, options)\n\n // collect sessions\n summary.sessions = collectSessions(fileList, options)\n\n return summary\n}\n\nexport default collectSummary\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.545918345451355, "avg_line_length": 23.5, "blob_id": "10b9b5e21a3a933321dda706b5c2093780c8d12e", "content_id": "7ab1254d20914c4b74e60a0c18e153d3e867c0ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 588, "license_type": "permissive", "max_line_length": 57, "num_lines": 24, "path": "/bids-validator/validators/tsv/checkAge89.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nexport const checkAge89 = function (rows, file, issues) {\n const header = rows[0]\n const ageIdColumn = header.indexOf('age')\n for (let a = 0; a < rows.length; a++) {\n const line = rows[a]\n const line_values = line\n const age = line_values[ageIdColumn]\n if (age >= 89) {\n issues.push(\n new Issue({\n file: file,\n evidence: line.join(','),\n line: a + 1,\n reason: 'age of participant is above 89 ',\n code: 56,\n }),\n )\n }\n }\n}\n\nexport default checkAge89\n" }, { "alpha_fraction": 0.7232360243797302, "alphanum_fraction": 0.7232360243797302, "avg_line_length": 17.065933227539062, "blob_id": "d2f780d3a4df1940e7b2135bbe44eb0f20b78448", "content_id": "6862cd527c5b36b5823b13c42899904e992e8c70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1644, "license_type": "permissive", "max_line_length": 74, "num_lines": 91, "path": "/bids-validator/src/types/schema.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Schema structure returned by loadSchema\n */\n\nexport interface Format {\n pattern: string\n}\n\nexport interface Entity {\n name: string\n type: string\n format: string\n}\n\nexport interface SchemaObjects {\n files: Record<string, unknown>\n formats: Record<string, Format>\n entities: Record<string, Entity>\n}\n\nexport interface SchemaRules {\n files: SchemaFiles\n modalities: Record<string, unknown>\n}\n\nexport interface SchemaFiles {\n common: Record<string, unknown>\n deriv: Record<string, unknown>\n raw: Record<string, unknown>\n}\n\nexport interface ExpressionTest {\n expression: string\n result: string\n}\n\nexport interface SchemaMeta {\n expression_tests: ExpressionTest[]\n}\n\nexport interface Schema {\n objects: SchemaObjects\n rules: SchemaRules\n schema_version: string\n meta: SchemaMeta\n}\n\nexport interface SchemaIssue {\n code: string\n message: string\n level?: string\n}\n\nexport type GenericSchema = { [key: string]: GenericRule | GenericSchema }\n\nexport interface GenericRule {\n selectors?: string[]\n checks?: string[]\n columns?: Record<string, string>\n additional_columns?: string\n initial_columns?: string[]\n fields: Record<string, SchemaFields>\n issue?: SchemaIssue\n extensions?: string[]\n suffixes?: string[]\n stem?: string\n path?: string\n datatypes?: string[]\n pattern?: string\n name?: string\n format?: string\n required?: string\n index_columns?: string[]\n}\n\nexport interface SchemaFields {\n level: string\n level_addendum?: string\n issue?: SchemaIssue\n}\n\ninterface SchemaType {\n type: string\n enum?: string[]\n}\n\ninterface AnyOf {\n anyOf: SchemaType[]\n}\n\nexport type SchemaTypeLike = AnyOf | SchemaType\n" }, { "alpha_fraction": 0.5911441445350647, "alphanum_fraction": 0.6137685775756836, "avg_line_length": 32.26881790161133, "blob_id": "6e579454ad54b71d918d13b1edbdfaaaa7b16809", "content_id": "19ac4b0f7cd53240d4338607fff6e15931431fa2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3094, "license_type": "permissive", "max_line_length": 154, "num_lines": 93, "path": "/bids-validator/tests/events.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport validateEvents from '../validators/events/validate'\n\ndescribe('Events', function () {\n const headers = [\n [\n {\n path: '/sub01/sub01_task-test_bold.nii.gz',\n relativePath: '/sub01/sub01_task-test_bold.nii.gz',\n },\n { dim: [4, 0, 0, 0, 10] },\n ],\n ]\n\n it('all files in the /stimuli folder should be included in an _events.tsv file', () => {\n // stimuli.events will have all of the\n // files included in the stim_file column of every _events.tsv file.\n // stimuli.directory will have all of the\n // files included in the /stimuli directory.\n const stimuli = {\n events: ['/stimuli/images/red-square.jpg'],\n directory: [{ relativePath: '/stimuli/images/blue-square.jpg' }],\n }\n const issues = validateEvents([], stimuli, [], {})\n assert.strictEqual(issues.length, 1)\n assert.strictEqual(issues[0].code, 77)\n })\n\n it('should not throw issues if all files in the /stimuli folder are included in an _events.tsv file', () => {\n const stimuli = {\n events: ['/stimuli/images/red-square.jpg'],\n directory: [{ relativePath: '/stimuli/images/red-square.jpg' }],\n }\n const issues = validateEvents([], stimuli, [], {})\n assert.deepStrictEqual(issues, [])\n })\n\n it('should throw an issue if the onset of the last event in _events.tsv is more than TR * number of volumes in corresponding nifti header', () => {\n const events = [\n {\n file: { path: '/sub01/sub01_task-test_events.tsv' },\n path: '/sub01/sub01_task-test_events.tsv',\n contents: '12\\tsomething\\tsomething\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_bold.json': {\n RepetitionTime: 1,\n },\n }\n\n const issues = validateEvents(events, [], headers, jsonDictionary)\n assert.strictEqual(issues.length, 1)\n assert.strictEqual(issues[0].code, 85)\n })\n\n it('should throw an issue if the onset of the last event in _events.tsv is less than .5 * TR * number of volumes in corresponding nifti header', () => {\n const events = [\n {\n file: { path: '/sub01/sub01_task-test_events.tsv' },\n path: '/sub01/sub01_task-test_events.tsv',\n contents: '2\\tsomething\\tsomething\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_bold.json': {\n RepetitionTime: 1,\n },\n }\n\n const issues = validateEvents(events, [], headers, jsonDictionary)\n assert.strictEqual(issues.length, 1)\n assert.strictEqual(issues[0].code, 86)\n })\n\n it('should not throw any issues if the onset of the last event in _events.tsv is a reasonable value', () => {\n const events = [\n {\n file: { path: '/sub01/sub01_task-test_events.tsv' },\n path: '/sub01/sub01_task-test_events.tsv',\n contents: '7\\tsomething\\tsomething\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_bold.json': {\n RepetitionTime: 1,\n },\n }\n\n const issues = validateEvents(events, [], headers, jsonDictionary)\n assert.deepStrictEqual(issues, [])\n })\n})\n" }, { "alpha_fraction": 0.7082657814025879, "alphanum_fraction": 0.7082657814025879, "avg_line_length": 21.851852416992188, "blob_id": "edc07adf175ce3806bd68020f2f2ae3d300d3afa", "content_id": "ef9fc7bc2c2aea5ce35abc99d88db5b2450d932e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 617, "license_type": "permissive", "max_line_length": 72, "num_lines": 27, "path": "/bids-validator/src/setup/requestPermissions.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const globalRead = { name: 'read' } as const\nconst globalEnv = { name: 'env' } as const\n\n/**\n * Request / query a PermissionDescriptor\n */\nasync function requestPermission(\n permission: Deno.PermissionDescriptor,\n): Promise<boolean> {\n const status = await Deno.permissions.request(permission)\n\n if (status.state === 'granted') {\n return true\n } else {\n return false\n }\n}\n\n/**\n * Request read permissions\n */\nexport const requestReadPermission = () => requestPermission(globalRead)\n\n/**\n * Request environment variable permissions\n */\nexport const requestEnvPermission = () => requestPermission(globalEnv)\n" }, { "alpha_fraction": 0.6125893592834473, "alphanum_fraction": 0.6249006986618042, "avg_line_length": 26.369565963745117, "blob_id": "a603f50448e6cc19fb29cd2ae43a64b5d558c708", "content_id": "48a5539c5a80c823cf379abf5d6c4ec34a86393e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5040, "license_type": "permissive", "max_line_length": 91, "num_lines": 184, "path": "/bids-validator/validators/microscopy/ometiff.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\n\n/**\n * ometiff\n *\n * Takes an ometiff file, its omedata as an object\n * and a callback as arguments. Callback\n * with any issues it finds while validating\n * against the BIDS specification.\n */\nexport default function ometiff(file, omeData, jsonContentsDict, callback) {\n let issues = []\n\n let mergedDictionary = getMergedDictionary(file, jsonContentsDict)\n\n let rootKey = Object.keys(omeData)[0]\n let namespace = ''\n if (rootKey.includes(':OME')) {\n namespace = rootKey.split(':OME')[0].concat(':')\n }\n\n // Check for consistency with optional OME-TIFF metadata if present for\n // Immersion, NumericalAperture and Magnification\n let optionalFieldsIssues = checkOptionalFields(\n file.relativePath,\n omeData,\n namespace,\n mergedDictionary,\n )\n\n // Check for consistency for PixelSize between JSON and OME-TIFF metadata\n let pixelSizeIssues = checkPixelSize(omeData, namespace, mergedDictionary)\n\n issues = issues.concat(optionalFieldsIssues).concat(pixelSizeIssues)\n\n callback(issues)\n}\n\nconst convertFactor = (omeUnit, jsonUnit) => {\n if (omeUnit === jsonUnit || (omeUnit === 'µm' && jsonUnit === 'um')) return 1\n\n if (jsonUnit === 'um') {\n if (omeUnit === 'mm') {\n return 1000\n } else if (omeUnit === 'nm') {\n return 0.001\n }\n } else if (jsonUnit === 'mm') {\n if (omeUnit === 'µm') {\n return 0.001\n } else if (omeUnit === 'nm') {\n return 0.000001\n }\n } else if (jsonUnit === 'nm') {\n if (omeUnit === 'mm') {\n return 1000000\n } else if (omeUnit === 'µm') {\n return 1000\n }\n }\n}\n\nconst getMergedDictionary = (file, jsonContentsDict) => {\n let possibleJsonPath = file.relativePath\n .replace('.tif', '')\n .replace('.ome', '.json')\n\n let potentialSidecars = utils.files.potentialLocations(possibleJsonPath)\n\n return utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n}\n\nconst checkOptionalFields = (omePath, omeData, namespace, jsonData) => {\n let issues = []\n\n let fields = {\n Immersion: 'Immersion',\n NumericalAperture: 'LensNA',\n Magnification: 'NominalMagnification',\n }\n\n if (\n omeData[`${namespace}OME`][`${namespace}Instrument`] &&\n omeData[`${namespace}OME`][`${namespace}Instrument`][0][\n `${namespace}Objective`\n ]\n ) {\n let objective =\n omeData[`${namespace}OME`][`${namespace}Instrument`][0][\n `${namespace}Objective`\n ][0]['$']\n for (let field in fields) {\n if (!fields.hasOwnProperty(field)) {\n continue\n }\n let property = fields[field]\n if (jsonData.hasOwnProperty(field) && objective[property]) {\n if (objective[property] != jsonData[field]) {\n issues.push(\n new Issue({\n file: {\n relativePath: omePath,\n path: omePath,\n },\n evidence: `JSON field '${field}' is inconsistent`,\n code: 224,\n }),\n )\n }\n }\n }\n }\n\n return issues\n}\n\nconst checkPixelSize = (omeData, namespace, jsonData) => {\n let issues = []\n let validUnits = ['um', 'µm', 'nm', 'mm']\n\n const PhysicalSizeX =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeX']\n const physicalSizeXUnit =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeXUnit']\n const PhysicalSizeY =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeY']\n const physicalSizeYUnit =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeYUnit']\n const PhysicalSizeZ =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeZ']\n const physicalSizeZUnit =\n omeData[`${namespace}OME`][`${namespace}Image`][0][`${namespace}Pixels`][0][\n '$'\n ]['PhysicalSizeZUnit']\n\n // if no corresponding json file, skip the consistency check\n if (Object.keys(jsonData).length === 0) return []\n\n let unitsPendToCheck = [\n physicalSizeXUnit,\n physicalSizeYUnit,\n physicalSizeZUnit,\n ]\n\n unitsPendToCheck.forEach((unit) => {\n if (!validUnits.includes(unit)) {\n issues.push(new Issue({ code: 222 }))\n }\n })\n\n // if any physicalSizeUnit is not valid or no valid json file, skip the consistency check\n if (issues.length > 0) return issues\n\n let pixelSize = jsonData['PixelSize']\n let physicalSizeUnit = jsonData['PixelSizeUnits']\n\n let factorX = convertFactor(physicalSizeXUnit, physicalSizeUnit)\n let factorY = convertFactor(physicalSizeYUnit, physicalSizeUnit)\n let factorZ = convertFactor(physicalSizeZUnit, physicalSizeUnit)\n\n if (\n PhysicalSizeX * factorX !== pixelSize[0] ||\n PhysicalSizeY * factorY !== pixelSize[1] ||\n PhysicalSizeZ * factorZ !== pixelSize[2]\n ) {\n issues.push(new Issue({ code: 221 }))\n }\n\n return issues\n}\n" }, { "alpha_fraction": 0.6352785229682922, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 29.15999984741211, "blob_id": "5ca05d6b8f3cabda2c87f1edd2e105a4a2593ed6", "content_id": "3a50a929021d8d2281e24a5d430f07002e03d387", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 754, "license_type": "permissive", "max_line_length": 75, "num_lines": 25, "path": "/bids-validator/validators/bids/checkReadme.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import isNode from '../../utils/isNode'\n\nconst Issue = require('../../utils').issues.Issue\n\nconst checkReadme = (fileList) => {\n const issues = []\n const readmeFiles = Array.from(Object.values(fileList)).filter(\n (file) => file.relativePath && file.relativePath.startsWith('/README'),\n )\n\n readmeFiles.map((readmeFile) => {\n const size = !isNode ? readmeFile.size : readmeFile.stats.size\n const failsSizeRequirement = size <= 150\n if (failsSizeRequirement) {\n issues.push(new Issue({ code: 213, file: readmeFile }))\n }\n })\n if (readmeFiles.length > 1) {\n issues.push(new Issue({ code: 228 }))\n } else if (readmeFiles.length === 0) {\n issues.push(new Issue({ code: 101 }))\n }\n return issues\n}\nexport default checkReadme\n" }, { "alpha_fraction": 0.6170212626457214, "alphanum_fraction": 0.6170212626457214, "avg_line_length": 14.666666984558105, "blob_id": "5cb5e8c8764dd021a821968b88a110aff6ac4cdf", "content_id": "47f4f2cc072fefbabe41d38ec46729c4485f500a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 47, "license_type": "permissive", "max_line_length": 32, "num_lines": 3, "path": "/bids-validator/src/bids-validator.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { main } from './main.ts'\n\nawait main()\n" }, { "alpha_fraction": 0.6462395787239075, "alphanum_fraction": 0.6473537683486938, "avg_line_length": 33.519229888916016, "blob_id": "626dfd5c6ef0768ddc152049ac67bf51679b7af4", "content_id": "f897a229e6523a8bab2edd80890dbd69219444e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1795, "license_type": "permissive", "max_line_length": 79, "num_lines": 52, "path": "/bids-validator/src/validators/filenameValidate.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { FileTree } from '../types/filetree.ts'\nimport { GenericSchema } from '../types/schema.ts'\nimport { assertEquals } from '../deps/asserts.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport { missingLabel, atRoot, entityLabelCheck } from './filenameValidate.ts'\nimport { BIDSFileDeno } from '../files/deno.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { FileIgnoreRules } from '../files/ignore.ts'\nimport { loadSchema } from '../setup/loadSchema.ts'\n\nconst schema = (await loadSchema()) as unknown as GenericSchema\nconst fileTree = new FileTree('/tmp', '/')\nconst issues = new DatasetIssues()\nconst ignore = new FileIgnoreRules([])\n\nDeno.test('test missingLabel', async (t) => {\n await t.step('File with underscore and no hyphens errors out.', async () => {\n const fileName = Deno.makeTempFileSync({\n prefix: 'no_labels_',\n suffix: '_entities.wav',\n }).split('/')[2]\n let file = new BIDSFileDeno('/tmp', fileName, ignore)\n\n let context = new BIDSContext(fileTree, file, issues)\n await missingLabel(schema, context)\n assertEquals(\n context.issues\n .getFileIssueKeys(context.file.path)\n .includes('ENTITY_WITH_NO_LABEL'),\n true,\n )\n })\n\n await t.step(\n \"File with underscores and hyphens doesn't error out.\",\n async () => {\n const fileName = Deno.makeTempFileSync({\n prefix: 'we-do_have-',\n suffix: '_entities.wav',\n }).split('/')[2]\n let file = new BIDSFileDeno('/tmp', fileName, ignore)\n let context = new BIDSContext(fileTree, file, issues)\n await missingLabel(schema, context)\n assertEquals(\n context.issues\n .getFileIssueKeys(context.file.path)\n .includes('ENTITY_WITH_NO_LABEL'),\n false,\n )\n },\n )\n})\n" }, { "alpha_fraction": 0.5428413152694702, "alphanum_fraction": 0.5551609396934509, "avg_line_length": 29.336700439453125, "blob_id": "20144ee863dda6efde834a75574f0ee7391d5df3", "content_id": "3684c49857d12b14c75a0c6146bf9cddf9b58384", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 9010, "license_type": "permissive", "max_line_length": 80, "num_lines": 297, "path": "/bids-validator/validators/headerFields.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../utils'\nvar Issue = utils.issues.Issue\nimport isNode from '../utils/isNode'\n\n/**\n * dimensions and resolution\n *\n * Checks dimensions and resolution for x, y, z, and time across subjects to\n * ensure they are consistent.\n *\n * The fields we are interested in are all arrays and we are only looking at\n * the first for values in those arrays. To handle single values or longer\n * arrays more arguments will need to be added to headerField.\n */\n\nconst headerFields = (headers) => {\n var finalIssues = []\n var allIssues39Dict = {}\n var fields = ['dim', 'pixdim']\n\n /* turn a list of dicts into a dict of lists */\n for (var i = 0; i < fields.length; i++) {\n var field = fields[i]\n var issues = headerField(headers, field)\n issues.forEach((issue) => {\n if (issue.code == 39) {\n if (allIssues39Dict.hasOwnProperty(issue.file.relativePath)) {\n allIssues39Dict[issue.file.relativePath].push(issue)\n } else {\n allIssues39Dict[issue.file.relativePath] = [issue]\n }\n } else {\n finalIssues.push(issue)\n }\n })\n }\n\n finalIssues = finalIssues.concat(collect39Issues(allIssues39Dict))\n\n return finalIssues\n}\n\nconst collect39Issues = (allIssues39Dict) => {\n const finalIssues = []\n for (let file of Object.keys(allIssues39Dict)) {\n const firstIssue = allIssues39Dict[file][0]\n let evidence = ''\n for (var issue of allIssues39Dict[file]) {\n evidence = evidence + ' ' + issue.reason\n }\n firstIssue.reason = evidence\n finalIssues.push(firstIssue)\n }\n return finalIssues\n}\n\n/**\n * Key to headerField working is the fact that we take and array of values\n * from the nifti header and convert it to a string. This string is used to\n * compare the header field value against other header field values and is used\n * as an attribute in the object nifti_types. Nifti types refers to the\n * different types of nifti files we are comparing across subjects. Only the\n * dimensionality of similar anatomy/functional/dwi headers are being compared.\n */\n\nconst headerField = (headers, field) => {\n var nifti_types = {}\n var issues = []\n for (var header_index = 0; header_index < headers.length; header_index++) {\n var badField = false\n var field_value\n var file = headers[header_index][0]\n var filename\n var header = headers[header_index][1]\n var match\n var path = file.relativePath\n var subject\n\n if (field === 'dim') {\n if (\n typeof header[field] === 'undefined' ||\n header[field] === null ||\n header[field].length < header[field][0]\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 40,\n }),\n )\n continue\n } else if (\n file.name.indexOf('_bold') > -1 &&\n (header[field][0] !== 4 || header[field].length !== 5)\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 54,\n evidence: 'header field \"dim\" = ' + header[field],\n }),\n )\n continue\n } else if (\n (file.name.indexOf('magnitude1') > -1 ||\n file.name.indexOf('magnitude2') > -1) &&\n header[field].length !== 4\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 94,\n evidence: 'this magnitude file has more than three dimensions. ',\n }),\n )\n continue\n } else if (file.name.indexOf('T1w') > -1 && header[field].length !== 4) {\n issues.push(\n new Issue({\n file: file,\n code: 95,\n evidence: 'this T1w file does not have exactly three dimensions. ',\n }),\n )\n }\n field_value = header[field].slice(1, header[field][0] + 1).toString()\n } else if (field === 'pixdim') {\n if (\n typeof header['xyzt_units'] === 'undefined' ||\n header['xyzt_units'] === null ||\n header['xyzt_units'].length < 4\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 41,\n }),\n )\n badField = true\n }\n if (\n typeof header['pixdim'] === 'undefined' ||\n header['pixdim'] === null ||\n header['pixdim'].length < 4\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 42,\n }),\n )\n badField = true\n }\n if (header['qform_code'] === 0 && header['sform_code'] === 0) {\n issues.push(\n new Issue({\n file: file,\n code: 60,\n }),\n )\n badField = true\n }\n if (badField === true) {\n continue\n }\n field_value = []\n var pix_dim = header[field].slice(1, 5)\n var units = header['xyzt_units'].slice(0, 4)\n for (var i = 0; i < pix_dim.length; i++) {\n field_value.push('' + pix_dim[i].toFixed(2) + units[i])\n }\n field_value = field_value.toString()\n } else {\n console.warn(\n 'Checks against header field: ' + field + ' are currently unsupported.',\n )\n return\n }\n\n if (!file || (!isNode && !file.webkitRelativePath)) {\n continue\n }\n\n //match the subject identifier up to the '/' in the full path to a file.\n match = path.match(/sub-(.*?)(?=\\/)/)\n if (match === null) {\n continue\n } else {\n subject = match[0]\n }\n // files are prepended with subject name, the following two commands\n // remove the subject from the file name to allow filenames to be more\n // easily compared\n filename = path.substring(path.match(subject).index + subject.length)\n filename = filename.replace(subject, '<sub>')\n\n // generalize the run number so we can compare counts across all runs\n match = filename.match(/run-\\d+/)\n if (match !== null) {\n filename = filename.replace(match[0], '<run>')\n }\n\n if (!nifti_types.hasOwnProperty(filename)) {\n nifti_types[filename] = {}\n nifti_types[filename][field_value] = { count: 1, files: [file] }\n } else {\n if (!nifti_types[filename].hasOwnProperty(field_value)) {\n nifti_types[filename][field_value] = { count: 1, files: [file] }\n } else {\n nifti_types[filename][field_value].count += 1\n nifti_types[filename][field_value].files.push(file)\n }\n }\n }\n for (let nifti_key of Object.keys(nifti_types)) {\n const nifti_type = nifti_types[nifti_key]\n let max_field_value = Object.keys(nifti_type)[0]\n for (let field_value_key in nifti_type) {\n if (nifti_type.hasOwnProperty(field_value_key)) {\n field_value = nifti_type[field_value_key]\n if (field_value.count > nifti_type[max_field_value].count) {\n max_field_value = field_value_key\n }\n }\n }\n for (let field_value_key of Object.keys(nifti_type)) {\n field_value = nifti_type[field_value_key]\n if (\n max_field_value !== field_value_key &&\n headerFieldCompare(max_field_value, field_value_key)\n ) {\n for (\n var nifti_file_index = 0;\n nifti_file_index < field_value.files.length;\n nifti_file_index++\n ) {\n var nifti_file = field_value.files[nifti_file_index]\n var evidence\n if (field === 'dim') {\n evidence =\n 'The most common set of dimensions is: ' +\n max_field_value +\n ' (voxels), This file has the dimensions: ' +\n field_value_key +\n ' (voxels).'\n } else if (field === 'pixdim') {\n evidence =\n 'The most common resolution is: ' +\n max_field_value.replace(/,/g, ' x ') +\n ', This file has the resolution: ' +\n field_value_key.replace(/,/g, ' x ') +\n '.'\n }\n issues.push(\n new Issue({\n file: nifti_file,\n reason: evidence,\n code: 39,\n }),\n )\n }\n }\n }\n }\n return issues\n}\n\n/**\n * if elements of the two arrays differ by less than one we won't raise a\n * warning about them. There are a large number of floating point rounding\n * errors that cause resolutions to be slightly different. Returns true if\n * the two headers are significantly different\n */\nconst headerFieldCompare = (header1, header2) => {\n var hdr1 = header1.split(',')\n var hdr2 = header2.split(',')\n if (hdr1.length !== hdr2.length) {\n return true\n }\n for (var i = 0; i < hdr1.length; i++) {\n var hdr1_val = Number(hdr1[i].match(/-?\\d*\\.?\\d*/))\n var hdr2_val = Number(hdr2[i].match(/-?\\d*\\.?\\d*/))\n // Matching alphas with * will return '' on headers without units\n var hdr1_unit = hdr1[i].match(/[A-Za-z]*$/)[0]\n var hdr2_unit = hdr2[i].match(/[A-Za-z]*$/)[0]\n if (Math.abs(hdr1_val - hdr2_val) > 0.00001) {\n return true\n }\n if (hdr1_unit !== hdr2_unit) {\n return true\n }\n }\n return false\n}\n\nexport default headerFields\nexport { collect39Issues }\n" }, { "alpha_fraction": 0.5742838382720947, "alphanum_fraction": 0.5936042666435242, "avg_line_length": 33.906978607177734, "blob_id": "8a213a6a21a4ab5359398df7429291e169f0b978", "content_id": "23f357b6ba0d2a0158f502b730c40db0ff55e9c8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1501, "license_type": "permissive", "max_line_length": 72, "num_lines": 43, "path": "/bids-validator/validators/__tests__/checkAnyDataPresent.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport { getFolderSubjects } from '../checkAnyDataPresent.js'\n\ndescribe('checkAnyDataPresent', () => {\n describe('getFolderSubjects()', () => {\n it('returns only unique subjects', () => {\n // Pseudo-FileList object but an array simulates it\n const fileList = [\n { relativePath: 'sub-01/files' },\n { relativePath: 'sub-01/another' },\n { relativePath: 'sub-02/data' },\n ]\n const subjects = getFolderSubjects(fileList)\n assert.isArray(subjects)\n assert.deepEqual(subjects, ['01', '02'])\n })\n it('filters out emptyroom subject', () => {\n const fileList = [\n { relativePath: 'sub-01/files' },\n { relativePath: 'sub-emptyroom/data' },\n ]\n const subjects = getFolderSubjects(fileList)\n assert.isArray(subjects)\n assert.deepEqual(subjects, ['01'])\n })\n it('works for deeply nested files', () => {\n const fileList = [\n { relativePath: 'sub-01/files/a.nii.gz' },\n { relativePath: 'sub-01/another/b.nii.gz' },\n { relativePath: 'sub-02/data/test' },\n ]\n const subjects = getFolderSubjects(fileList)\n assert.isArray(subjects)\n assert.deepEqual(subjects, ['01', '02'])\n })\n it('works with object arguments', () => {\n const fileList = { 0: { relativePath: 'sub-01/anat/one.nii.gz' } }\n const subjects = getFolderSubjects(fileList)\n assert.isArray(subjects)\n assert.deepEqual(subjects, ['01'])\n })\n })\n})\n" }, { "alpha_fraction": 0.5150602459907532, "alphanum_fraction": 0.5647590160369873, "avg_line_length": 27.869565963745117, "blob_id": "9a7dba8f123720dcd840123c9983d6fc0f602bce", "content_id": "584c64553488b42a25021d0f0dcbc3745ec45ee4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1328, "license_type": "permissive", "max_line_length": 77, "num_lines": 46, "path": "/bids-validator/tests/bval.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport bval from '../validators/bval/bval'\n\ndescribe('bval', function () {\n it('should allow proper bval contents', function () {\n const val = '4 6 2 5 3 23 5'\n bval({}, val, function (issues) {\n assert.deepEqual(issues, [])\n })\n })\n\n it('should not allow more than one row', function () {\n const val = '0 4 3 6 1 6 2 4 1\\n 4 3 5 2 4 2 4 5'\n bval({}, val, function (issues) {\n assert(issues.length == 1 && issues[0].code == 30)\n })\n })\n\n it('should catch doublespace separators', function () {\n const val = '4 6 2 5 3 23 5'\n bval({}, val, function (issues) {\n assert(issues.length == 1 && issues[0].code == 47)\n })\n })\n\n it('should not allow undefined bvals', function () {\n const val = undefined\n bval({}, val, function (issues) {\n assert(issues.length == 1 && issues[0].code == 89)\n })\n })\n\n it('should not allow bvals of types other than string', function () {\n const val = [0, 1, 2, 3]\n bval({}, val, function (issues) {\n assert(issues.length == 1 && issues[0].code == 89)\n })\n })\n\n it('should not allow bvecs to be submitted in place of bval', function () {\n const val = '4 6 7\\n 2 3 4\\n 4 5 6'\n bval({}, val, function (issues) {\n assert(issues.length == 1 && issues[0].code == 30)\n })\n })\n})\n" }, { "alpha_fraction": 0.47993215918540955, "alphanum_fraction": 0.5528547167778015, "avg_line_length": 28.983051300048828, "blob_id": "e02b8f0c3e4aca062baa3f42d11780d004fabedd", "content_id": "9c81558980d79b82725270c2753d508d9d0924aa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1769, "license_type": "permissive", "max_line_length": 77, "num_lines": 59, "path": "/bids-validator/tests/bvec.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport bvec from '../validators/bvec/bvec'\n\ndescribe('bvec', function () {\n it('should allow valid bvec contents', function () {\n const vec = '4 6 2 5\\n3 2 3 5\\n6 4 3 5'\n bvec({}, vec, function (issues) {\n assert.deepEqual(issues, [])\n })\n })\n\n it('should not allow more or less than 3 rows', function () {\n let vec = '0 4 3 6 1 6 2 4\\n 4 3 5 2 4 2 4 5'\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 31)\n })\n\n vec =\n '0 4 3 6 1 6 2 4\\n 4 3 5 2 4 2 4 5\\n 4 3 5 2 4 2 4 5\\n 4 3 5 2 4 2 4 5'\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 31)\n })\n })\n\n it('should not allow rows of inconsistent length', function () {\n const vec = '0 4 3 6 1 6 4\\n 4 3 4 2 4 5\\n 4 3 5 2 4 2 4 5'\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 46)\n })\n })\n\n it('should catch doublespace separators', function () {\n const vec = '4 6 2 5\\n3 2 3 5\\n6 4 3 5'\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 47)\n })\n })\n\n it('should not allow undefined bvecs', function () {\n const vec = undefined\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 88)\n })\n })\n\n it('should not allow bvecs of types other than string', function () {\n const vec = [0, 1, 2, 3]\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 88)\n })\n })\n\n it('should not allow bvals to be submitted in place of bvec', function () {\n const vec = '4 6 7'\n bvec({}, vec, function (issues) {\n assert(issues.length == 1 && issues[0].code == 31)\n })\n })\n})\n" }, { "alpha_fraction": 0.6545745134353638, "alphanum_fraction": 0.655133843421936, "avg_line_length": 29.749385833740234, "blob_id": "1bc97cfcc3d580723bc30f50aaf4ecb25917268a", "content_id": "64cffc50c4d00ba9f67b60715b561bba51129423", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 12515, "license_type": "permissive", "max_line_length": 181, "num_lines": 407, "path": "/bids-validator/src/schema/applyRules.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import {\n GenericRule,\n GenericSchema,\n SchemaFields,\n SchemaTypeLike,\n} from '../types/schema.ts'\nimport { Severity } from '../types/issues.ts'\nimport { BIDSContext } from './context.ts'\nimport { expressionFunctions } from './expressionLanguage.ts'\nimport { logger } from '../utils/logger.ts'\nimport { memoize } from '../utils/memoize.ts'\n\n/**\n * Given a schema and context, evaluate which rules match and test them.\n * Recursively descend into schema object and iterate over each levels keys.\n * If we find a child of the object that isn't an Object ignore it, this will\n * be things that show up in meta and objects directories. If an an object\n * has a selectors key we know that this is an actual rule that we know how\n * to evaluate. Finally if what we have is an Object recurse on it to see if\n * its children have any rules.\n * @param schema\n * @param context\n */\nexport function applyRules(\n schema: GenericSchema,\n context: BIDSContext,\n rootSchema?: GenericSchema,\n schemaPath?: string,\n) {\n if (!rootSchema) {\n rootSchema = schema\n }\n if (!schemaPath) {\n schemaPath = 'schema'\n }\n Object.assign(context, expressionFunctions)\n // @ts-expect-error\n context.exists.bind(context)\n for (const key in schema) {\n if (!(schema[key].constructor === Object)) {\n continue\n }\n if ('selectors' in schema[key]) {\n evalRule(\n schema[key] as GenericRule,\n context,\n rootSchema,\n `${schemaPath}.${key}`,\n )\n } else if (schema[key].constructor === Object) {\n applyRules(\n schema[key] as GenericSchema,\n context,\n rootSchema,\n `${schemaPath}.${key}`,\n )\n }\n }\n return Promise.resolve()\n}\n\nconst evalConstructor = (src: string): Function =>\n new Function('context', `with (context) { return ${src} }`)\nconst safeHas = () => true\nconst safeGet = (target: any, prop: any) =>\n prop === Symbol.unscopables ? undefined : target[prop]\n\nconst memoizedEvalConstructor = memoize(evalConstructor)\n\nexport function evalCheck(src: string, context: BIDSContext) {\n const test = memoizedEvalConstructor(src)\n const safeContext = new Proxy(context, { has: safeHas, get: safeGet })\n try {\n return test(safeContext)\n } catch (error) {\n logger.debug(error)\n return false\n }\n}\n\n/**\n * Different keys in a rule have different interpretations.\n * We associate theys keys from a rule object to a function adds an\n * issue to the context if the rule evaluation fails.\n */\n// @ts-expect-error\nconst evalMap: Record<\n keyof GenericRule,\n (\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n ) => boolean | void\n> = {\n checks: evalRuleChecks,\n columns: evalColumns,\n additional_columns: evalAdditionalColumns,\n initial_columns: evalInitialColumns,\n index_columns: evalIndexColumns,\n fields: evalJsonCheck,\n}\n\n/**\n * Entrypoint for evaluating a individual rule.\n * We see if every selector applies to this context,\n * Then we attempt to interpret every other key in the rule\n * object.\n */\nfunction evalRule(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n) {\n if (rule.selectors && !mapEvalCheck(rule.selectors, context)) {\n return\n }\n Object.keys(rule)\n .filter((key) => key in evalMap)\n .map((key) => {\n // @ts-expect-error\n evalMap[key](rule, context, schema, schemaPath)\n })\n}\n\nfunction mapEvalCheck(statements: string[], context: BIDSContext): boolean {\n return statements.every((x) => evalCheck(x, context))\n}\n\n/**\n * Classic rules interpreted like selectors. Examples in specification:\n * schema/rules/checks/*\n */\nfunction evalRuleChecks(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): boolean {\n if (rule.checks && !mapEvalCheck(rule.checks, context)) {\n if (rule.issue?.code && rule.issue?.message) {\n context.issues.add({\n key: rule.issue.code,\n reason: rule.issue.message,\n files: [{ ...context.file, evidence: schemaPath }],\n severity: rule.issue.level as Severity,\n })\n } else {\n context.issues.addNonSchemaIssue('CHECK_ERROR', [\n { ...context.file, evidence: schemaPath },\n ])\n }\n }\n return true\n}\n\n/**\n * schema.formats contains named types with patterns. Many entries in\n * schema.objects have a format to constrain its possible values. Presently\n * this is written with tsv's in mind. The blanket n/a pass may be inappropriate\n * for other type checks. filenameValidate predates this but does similar type\n * checking for entities.\n */\nfunction schemaObjectTypeCheck(\n schemaObject: SchemaTypeLike,\n value: string,\n schema: GenericSchema,\n): boolean {\n // always allow n/a?\n if (value === 'n/a') {\n return true\n }\n if ('anyOf' in schemaObject) {\n return schemaObject.anyOf.some((x) =>\n schemaObjectTypeCheck(x, value, schema),\n )\n }\n if ('enum' in schemaObject && schemaObject.enum) {\n return schemaObject.enum.some((x) => x === value)\n }\n // @ts-expect-error\n const format = schema.objects.formats[schemaObject.type]\n const re = new RegExp(`^${format.pattern}$`)\n return re.test(value)\n}\n\n/**\n * Columns in schema rules are assertions about the requirement level of what\n * headers should be present in a tsv file. Examples in specification:\n * schema/rules/tabular_data/*\n */\nfunction evalColumns(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): void {\n if (!rule.columns || context.extension !== '.tsv') return\n const headers = [...context.columns.keys()]\n for (const [ruleHeader, requirement] of Object.entries(rule.columns)) {\n // @ts-expect-error\n const columnObject = schema.objects.columns[ruleHeader]\n const name = columnObject.name\n if (!headers.includes(name) && requirement === 'required') {\n context.issues.addNonSchemaIssue('TSV_COLUMN_MISSING', [\n {\n ...context.file,\n evidence: `Column with header ${name} listed as required. ${schemaPath}`,\n },\n ])\n }\n if (headers.includes(name)) {\n for (const value of context.columns[name]) {\n if (\n !schemaObjectTypeCheck(columnObject as SchemaTypeLike, value, schema)\n ) {\n context.issues.addNonSchemaIssue('TSV_VALUE_INCORRECT_TYPE', [\n {\n ...context.file,\n evidence: `'${value}' ${Deno.inspect(columnObject)}`,\n },\n ])\n break\n }\n }\n }\n }\n}\n\n/**\n * A small subset of tsv schema rules enforce a specific order of columns.\n * No error is currently provided by the rule itself.\n */\nfunction evalInitialColumns(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): void {\n if (!rule?.columns || !rule?.initial_columns || context.extension !== '.tsv')\n return\n const headers = [...context.columns.keys()]\n rule.initial_columns.map((ruleHeader: string, ruleIndex: number) => {\n // @ts-expect-error\n const ruleHeaderName = schema.objects.columns[ruleHeader].name\n const contextIndex = headers.findIndex((x) => x === ruleHeaderName)\n if (contextIndex === -1) {\n const evidence = `Column with header ${ruleHeaderName} not found, indexed from 0 it should appear in column ${ruleIndex}. ${schemaPath}`\n context.issues.addNonSchemaIssue('TSV_COLUMN_MISSING', [\n { ...context.file, evidence: evidence },\n ])\n } else if (ruleIndex !== contextIndex) {\n const evidence = `Column with header ${ruleHeaderName} found at index ${contextIndex} while rule specifies, indexed from 0, it should be in column ${ruleIndex}. ${schemaPath}`\n context.issues.addNonSchemaIssue('TSV_COLUMN_ORDER_INCORRECT', [\n { ...context.file, evidence: evidence },\n ])\n }\n })\n}\n\nfunction evalAdditionalColumns(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): void {\n if (context.extension !== '.tsv') return\n const headers = Object.keys(context?.columns)\n // hard coding allowed here feels bad\n if (!(rule.additional_columns === 'allowed') && rule.columns) {\n const ruleHeadersNames = Object.keys(rule.columns).map(\n // @ts-expect-error\n (x) => schema.objects.columns[x].name,\n )\n let extraCols = headers.filter(\n (header) => !ruleHeadersNames.includes(header),\n )\n if (rule.additional_columns === 'allowed_if_defined') {\n extraCols = extraCols.filter((header) => !(header in context.sidecar))\n }\n if (extraCols.length) {\n context.issues.addNonSchemaIssue('TSV_ADDITIONAL_COLUMNS_NOT_ALLOWED', [\n { ...context.file, evidence: `Disallowed columns found ${extraCols}` },\n ])\n }\n }\n}\n\nfunction evalIndexColumns(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): void {\n if (\n !rule?.columns ||\n !rule?.index_columns ||\n !rule?.index_columns.length ||\n context.extension !== '.tsv'\n )\n return\n const headers = Object.keys(context?.columns)\n const uniqueIndexValues = new Set()\n const index_columns = rule.index_columns.map((col: string) => {\n // @ts-expect-error\n return schema.objects.columns[col].name\n })\n const missing = index_columns.filter((col: string) => !headers.includes(col))\n if (missing.length) {\n context.issues.addNonSchemaIssue('TSV_COLUMN_MISSING', [\n {\n ...context.file,\n evidence: `Columns cited as index columns not in file: ${missing}. ${schemaPath}`,\n },\n ])\n return\n }\n const rowCount = context.columns[index_columns[0]].length\n for (let i = 0; i < rowCount; i++) {\n let indexValue = ''\n index_columns.map((col: string) => {\n indexValue = indexValue.concat(context.columns[col][i])\n })\n if (uniqueIndexValues.has(indexValue)) {\n context.issues.addNonSchemaIssue('TSV_INDEX_VALUE_NOT_UNIQUE', [\n { ...context.file, evidence: `Row: ${i + 2}, Value: ${indexValue}` },\n ])\n } else {\n uniqueIndexValues.add(indexValue)\n }\n }\n}\n\n/**\n * For evaluating field requirements and values that should exist in a json\n * sidecar for a file. Will need to implement an additional check/error for\n * `prohibitied` fields. Examples in specification:\n * schema/rules/sidecars/*\n *\n */\nfunction evalJsonCheck(\n rule: GenericRule,\n context: BIDSContext,\n schema: GenericSchema,\n schemaPath: string,\n): void {\n for (const [key, requirement] of Object.entries(rule.fields)) {\n const severity = getFieldSeverity(requirement, context)\n // @ts-expect-error\n const keyName = schema.objects.metadata[key].name\n if (severity && severity !== 'ignore' && !(keyName in context.sidecar)) {\n if (requirement.issue?.code && requirement.issue?.message) {\n context.issues.add({\n key: requirement.issue.code,\n reason: requirement.issue.message,\n severity,\n files: [{ ...context.file }],\n })\n } else {\n context.issues.addNonSchemaIssue('JSON_KEY_REQUIRED', [\n {\n ...context.file,\n evidence: `missing ${keyName} as per ${schemaPath}`,\n },\n ])\n }\n }\n }\n}\n\n/**\n * JSON Field checks have conditions where their requirement levels can\n * change based on some other field. This function resolves the severity\n * of a JsonCheckFailure depending on how the checks level object is shaped.\n */\nfunction getFieldSeverity(\n requirement: string | SchemaFields,\n context: BIDSContext,\n): Severity {\n // Does this conversion hold for other parts of the schema or just json checks?\n const levelToSeverity: Record<string, Severity> = {\n recommended: 'ignore',\n required: 'error',\n optional: 'ignore',\n prohibited: 'ignore',\n }\n let severity: Severity = 'ignore'\n\n if (typeof requirement === 'string' && requirement in levelToSeverity) {\n severity = levelToSeverity[requirement]\n } else if (typeof requirement === 'object' && requirement.level) {\n severity = levelToSeverity[requirement.level]\n const addendumRegex = /(required|recommended) if \\`(\\w+)\\` is \\`(\\w+)\\`/\n if (requirement.level_addendum) {\n const match = addendumRegex.exec(requirement.level_addendum)\n if (match && match.length === 4) {\n const [_, addendumLevel, key, value] = match\n // @ts-expect-error\n if (key in context.sidecar && context.sidecar[key] === value) {\n severity = levelToSeverity[addendumLevel]\n }\n }\n }\n }\n return severity\n}\n" }, { "alpha_fraction": 0.5592105388641357, "alphanum_fraction": 0.5694901347160339, "avg_line_length": 31, "blob_id": "932447a5f61a7eb9224dff4078adf612aade5c7f", "content_id": "f8ef3627e3af117572c4b4a46c8b7688eab6190a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2432, "license_type": "permissive", "max_line_length": 78, "num_lines": 76, "path": "/bids-validator/src/issues/datasetIssues.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals, assertObjectMatch } from '../deps/asserts.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { IssueFile } from '../types/issues.ts'\nimport { DatasetIssues } from './datasetIssues.ts'\n\nDeno.test('DatasetIssues management class', async (t) => {\n await t.step('Constructor succeeds', () => {\n new DatasetIssues()\n })\n await t.step('add an Issue', () => {\n const issues = new DatasetIssues()\n issues.add({ key: 'TEST_ERROR', reason: 'Test issue' })\n assertEquals(issues.hasIssue({ key: 'TEST_ERROR' }), true)\n })\n await t.step('add Issue with several kinds of files', () => {\n // This mostly tests the issueFile mapping function\n const issues = new DatasetIssues()\n const testStream = new ReadableStream()\n const text = () => Promise.resolve('')\n const files = [\n {\n text,\n name: 'dataset_description.json',\n path: '/dataset_description.json',\n size: 500,\n ignored: false,\n stream: testStream,\n } as BIDSFile,\n {\n text,\n name: 'README',\n path: '/README',\n size: 500,\n ignored: false,\n stream: testStream,\n line: 1,\n character: 5,\n severity: 'warning',\n reason: 'Readme borked',\n } as IssueFile,\n ]\n issues.add({ key: 'TEST_FILES_ERROR', reason: 'Test issue', files })\n assertEquals(issues.getFileIssueKeys('/README'), ['TEST_FILES_ERROR'])\n for (const [key, issue] of issues) {\n assertObjectMatch(issue, { key: 'TEST_FILES_ERROR' })\n for (const f of issue.files.values()) {\n // Checking all files for the key assures they are in IssueFile format\n assertObjectMatch(f, {\n stream: Promise.resolve(testStream),\n })\n }\n }\n })\n await t.step(\n 'issues formatted matching the expected IssueOutput type',\n () => {\n const issues = new DatasetIssues()\n issues.add({ key: 'TEST_ERROR', reason: 'Test issue' })\n assertEquals(issues.hasIssue({ key: 'TEST_ERROR' }), true)\n assertEquals(issues.formatOutput(), {\n errors: [\n {\n additionalFileCount: 0,\n code: -9007199254740991,\n files: [],\n helpUrl: 'https://neurostars.org/search?q=TEST_ERROR',\n key: 'TEST_ERROR',\n reason: 'Test issue',\n severity: 'error',\n },\n ],\n warnings: [],\n })\n },\n )\n})\n" }, { "alpha_fraction": 0.5903459787368774, "alphanum_fraction": 0.5933361649513245, "avg_line_length": 25.011110305786133, "blob_id": "35f69182ea7d1ec03533ca1d657db6da509ec41b", "content_id": "65be0ed1dc608431702c263fb37849cb9796db0c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2341, "license_type": "permissive", "max_line_length": 73, "num_lines": 90, "path": "/bids-validator/cli.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/*eslint no-console: [\"error\", {allow: [\"log\"]}] */\n\nimport { parseOptions } from './validators/options'\nimport validate from './index.js'\n\nconst format = validate.consoleFormat\nimport colors from 'colors/safe'\nimport fs from 'fs'\nimport { filenamesOnly } from './utils/filenamesOnly.js'\n\nconst errorToString = (err) => {\n if (err instanceof Error) return err.stack\n else if (typeof err === 'object') return JSON.parse(err)\n else return err\n}\n\n/**\n * Write a large string or buffer to stdout and wait for the drain event\n *\n * This is needed to avoid truncating buffered output when piped\n * @param {string} data\n * @param {function} cb\n */\nconst writeStdout = (data, cb) => {\n if (!process.stdout.write(data)) {\n process.stdout.once('drain', cb)\n } else {\n process.nextTick(cb)\n }\n}\n\nexport function cli(argumentOverride) {\n return new Promise((resolve, reject) => {\n // Setup CLI state when called via Node.js\n if (process.env['NO_COLOR'] !== undefined) {\n colors.disable()\n }\n process.title = 'bids-validator'\n const argv = parseOptions(argumentOverride)\n const dir = argv._[0]\n const options = argv\n process.on('unhandledRejection', (err) => {\n console.log(\n format.unexpectedError(\n // eslint-disable-next-line\n `Unhandled rejection (\\n reason: ${errorToString(err)}\\n).\\n`,\n ),\n )\n reject(3)\n })\n\n if (options.filenames) {\n return filenamesOnly()\n }\n\n try {\n // Test if we can access the dataset directory at all\n fs.opendirSync(dir)\n } catch (err) {\n console.log(colors.red(dir + ' does not exist or is inaccessible'))\n reject(2)\n }\n\n validate.BIDS(dir, options, function (issues, summary) {\n function resolveOrReject() {\n if (\n issues === 'Invalid' ||\n (issues.errors && issues.errors.length >= 1) ||\n (issues.config && issues.config.length >= 1)\n ) {\n reject(1)\n } else {\n resolve(0)\n }\n }\n if (options.json) {\n writeStdout(JSON.stringify({ issues, summary }), resolveOrReject)\n } else {\n writeStdout(\n format.issues(issues, options) +\n '\\n' +\n format.summary(summary, options),\n resolveOrReject,\n )\n }\n })\n })\n}\n\nexport default cli\n" }, { "alpha_fraction": 0.5307742953300476, "alphanum_fraction": 0.5565850138664246, "avg_line_length": 22.984127044677734, "blob_id": "0de59ffb04468cb60cecc35a1419a081ba45c587", "content_id": "c9ee84ac7077f4e78101f1c0cec2a2a5edd0ed98", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1511, "license_type": "permissive", "max_line_length": 89, "num_lines": 63, "path": "/bids-validator/utils/array.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "var array = {\n /**\n * Equals\n *\n * Takes two arrays and returns true if they're\n * equal. Takes a third optional boolean argument\n * to sort arrays before checking equality.\n */\n equals: function (array1, array2, sort) {\n // if the other array is a falsy value, return\n if (!array1 || !array2) {\n return false\n }\n\n // compare lengths\n if (array1.length != array2.length) {\n return false\n }\n\n // optionally sort arrays\n if (sort) {\n array1.sort()\n array2.sort()\n }\n\n for (var i = 0, l = array1.length; i < l; i++) {\n // Check if we have nested arrays\n if (array1[i] instanceof Array && array2[i] instanceof Array) {\n // recurse into the nested arrays\n if (!array.equals(array1[i], array2[i], sort)) {\n return false\n }\n } else if (array1[i] != array2[i]) {\n // Warning - two different object instances will never be equal: {x:20} != {x:20}\n return false\n }\n }\n return true\n },\n\n /**\n * Takes to arrays and returns an array of two\n * arrays contains the differences contained\n * in each array.\n */\n diff: function (array1, array2) {\n var diff1 = [],\n diff2 = []\n for (var i = 0; i < array1.length; i++) {\n var elem1 = array1[i]\n var index = array2.indexOf(elem1)\n if (index > -1) {\n array2.splice(index, 1)\n } else {\n diff1.push(elem1)\n }\n }\n diff2 = array2\n return [diff1, diff2]\n },\n}\n\nexport default array\n" }, { "alpha_fraction": 0.637785792350769, "alphanum_fraction": 0.6486161351203918, "avg_line_length": 29.77777862548828, "blob_id": "29753481d809e19210bd4a4a84a27fe32f35a14c", "content_id": "909e876f219670a62b33a2a41f67df05e74f4c4f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 831, "license_type": "permissive", "max_line_length": 76, "num_lines": 27, "path": "/bids-validator/validators/nifti/phasediffWithoutMagnitude.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nconst phasediffWithoutMagnitude = (files) => {\n // check to see if each phasediff is associated with magnitude\n const issues = []\n const niftiNames = files.map((nifti) => nifti.name)\n const phaseDiffNiftis = niftiNames.filter(\n (nifti) => nifti.indexOf('phasediff') > -1,\n )\n const magnitude1Niftis = niftiNames.filter(\n (nifti) => nifti.indexOf('magnitude1') > -1,\n )\n phaseDiffNiftis.map((nifti) => {\n const associatedMagnitudeFile = nifti.replace('phasediff', 'magnitude1')\n if (magnitude1Niftis.indexOf(associatedMagnitudeFile) === -1) {\n issues.push(\n new Issue({\n code: 92,\n file: files.find((niftiFile) => niftiFile.name == nifti),\n }),\n )\n }\n })\n return issues\n}\n\nexport default phasediffWithoutMagnitude\n" }, { "alpha_fraction": 0.6232699155807495, "alphanum_fraction": 0.6275951266288757, "avg_line_length": 27.899999618530273, "blob_id": "14d1590e2b3dc9f5be7d65a84f84d7c8d6543972", "content_id": "14df7e3cbe120ce1ac7c17831657cf7727b1db93", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2312, "license_type": "permissive", "max_line_length": 79, "num_lines": 80, "path": "/bids-validator/src/tests/local/bids_examples.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// Deno runtime tests for tests/data/valid_dataset\nimport { assert, assertEquals } from '../../deps/asserts.ts'\nimport { Cell, Row, Table } from '../../deps/cliffy.ts'\nimport { colors } from '../../deps/fmt.ts'\nimport { IssueOutput } from '../../types/issues.ts'\nimport { validatePath, formatAssertIssue } from './common.ts'\nimport { parseOptions } from '../../setup/options.ts'\n\nconst options = await parseOptions(['fake_dataset_arg', ...Deno.args])\noptions.ignoreNiftiHeaders = true\n\n// Stand in for old validator config that could ignore issues\nfunction useIssue(issue: IssueOutput): boolean {\n return (\n 'schema.rules.checks.general.DuplicateFiles' !== issue.files[0].evidence &&\n issue.key !== 'EMPTY_FILE'\n )\n}\n\nlet header: string[] = ['issue key', 'filename', 'schema path']\nheader = header.map((x) => colors.magenta(x))\n\nconst errors: Row[] = []\nfunction formatBEIssue(issue: IssueOutput, dsPath: string) {\n errors.push(\n Row.from([\n colors.red(issue.key),\n issue.files[0].file.name,\n issue.files[0].evidence,\n ]),\n )\n}\n\nDeno.test('validate bids-examples', async (t) => {\n const prefix = 'tests/data/bids-examples'\n const dirEntries = Array.from(Deno.readDirSync(prefix))\n\n for (const dirEntry of dirEntries.sort((a, b) =>\n a.name.localeCompare(b.name),\n )) {\n if (!dirEntry.isDirectory || dirEntry.name.startsWith('.')) {\n continue\n }\n const path = `${prefix}/${dirEntry.name}`\n\n try {\n if (Deno.statSync(`${path}/.SKIP_VALIDATION`).isFile) {\n continue\n }\n } catch (e) {}\n const { tree, result } = await validatePath(t, path, options)\n const output = result.issues.formatOutput()\n output.errors = output.errors.filter((x) => useIssue(x))\n await t.step(`${path} has no issues`, () => {\n assertEquals(output.errors.length, 0)\n })\n if (output.errors.length === 0) {\n continue\n }\n\n errors.push(\n Row.from([\n new Cell(colors.cyan(dirEntry.name)).colSpan(4),\n undefined,\n undefined,\n undefined,\n ]).border(true),\n )\n output.errors.map((x) => formatBEIssue(x, dirEntry.name))\n }\n const table = new Table()\n .header(header)\n .body(errors)\n .border(false)\n .padding(1)\n .indent(2)\n .maxColWidth(40)\n .toString()\n console.log(table)\n})\n" }, { "alpha_fraction": 0.7200000286102295, "alphanum_fraction": 0.7221052646636963, "avg_line_length": 26.941177368164062, "blob_id": "e74012f166d69ae7be62530dc8dd161400663c0e", "content_id": "6e1d600128615ae78d5340c2f65aa8d45f68d8ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 475, "license_type": "permissive", "max_line_length": 58, "num_lines": 17, "path": "/bids-validator/utils/files/getBFileContent.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Get B-File Contents\n *\n * Takes an array of potential bval or bvec files\n * and a master b-file contents dictionary and returns\n * the contents of the desired file.\n */\nfunction getBFileContent(potentialBFiles, bContentsDict) {\n for (var i = 0; i < potentialBFiles.length; i++) {\n var potentialBFile = potentialBFiles[i]\n if (bContentsDict.hasOwnProperty(potentialBFile)) {\n return bContentsDict[potentialBFile]\n }\n }\n}\n\nexport default getBFileContent\n" }, { "alpha_fraction": 0.7213822603225708, "alphanum_fraction": 0.7213822603225708, "avg_line_length": 29.866666793823242, "blob_id": "ec0d78c3274a306e0245856d7000015fef1f1cfd", "content_id": "704a5b0c985d757f4287b2e282648ac5a0860edb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 463, "license_type": "permissive", "max_line_length": 112, "num_lines": 15, "path": "/bids-validator/src/types/check.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { GenericSchema } from './schema.ts'\nimport { BIDSContext } from '../schema/context.ts'\n\n/** Function interface for writing a check */\nexport type CheckFunction = (\n schema: GenericSchema,\n context: BIDSContext,\n) => Promise<void>\n\n/** Function interface for a check of context against a specific rule as accessed by its path in the schema. */\nexport type RuleCheckFunction = (\n path: string,\n schema: GenericSchema,\n context: BIDSContext,\n) => void\n" }, { "alpha_fraction": 0.5805876851081848, "alphanum_fraction": 0.5805876851081848, "avg_line_length": 30.19444465637207, "blob_id": "c8c5e6d742a2dd05bc54251535c7fc64e5fc0c58", "content_id": "fb64a409752b9d36bdf69d16e51f4c0f819b6722", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1123, "license_type": "permissive", "max_line_length": 69, "num_lines": 36, "path": "/bids-validator/src/setup/loadSchema.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert, assertObjectMatch } from '../deps/asserts.ts'\nimport { loadSchema } from './loadSchema.ts'\n\nDeno.test('schema yaml loader', async (t) => {\n await t.step('reads in top level files document', async () => {\n const schemaDefs = await loadSchema()\n // Look for some stable fields in top level files\n if (\n typeof schemaDefs.rules.files.common === 'object' &&\n schemaDefs.rules.files.common.core !== null\n ) {\n const top_level = schemaDefs.rules.files.common.core as Record<\n string,\n any\n >\n if (top_level.hasOwnProperty('README')) {\n assertObjectMatch(top_level.README, {\n level: 'recommended',\n stem: 'README',\n extensions: ['', '.md', '.rst', '.txt'],\n })\n }\n } else {\n assert(false, 'failed to test schema defs')\n }\n })\n await t.step('loads all schema files', async () => {\n const schemaDefs = await loadSchema()\n if (\n !(typeof schemaDefs.objects === 'object') ||\n !(typeof schemaDefs.rules === 'object')\n ) {\n assert(false, 'failed to load objects/rules')\n }\n })\n})\n" }, { "alpha_fraction": 0.6511764526367188, "alphanum_fraction": 0.6517646908760071, "avg_line_length": 29.909090042114258, "blob_id": "fa7b18b385fe7f13d682f85329768771283c927a", "content_id": "7d4685b75bc707e41d842f0dde4026494baccd35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1700, "license_type": "permissive", "max_line_length": 70, "num_lines": 55, "path": "/bids-validator/validators/bids/start.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { version } from '../../package.json'\nimport BIDS from './obj'\nimport reset from './reset'\nimport quickTest from './quickTest'\nimport quickTestError from './quickTestError'\nimport fullTest from './fullTest'\nimport utils from '../../utils'\nimport { schemaRegex } from '../../validators/schemaTypes'\nimport { schemaSetup } from '../../utils/type'\n\n/**\n * Start\n *\n * Takes either a filelist array or\n * a path to a BIDS directory and an\n * options object and starts\n * the validation process and\n * returns the errors and warnings as\n * arguments to the callback.\n */\nconst start = (dir, options, callback) => {\n if (!options.json) {\n // eslint-disable-next-line\n console.log(`bids-validator@${version}`)\n }\n\n utils.options.parse(dir, options, async function (issues, options) {\n if (issues && issues.length > 0) {\n // option parsing issues\n callback({ config: issues })\n } else {\n BIDS.options = options\n reset(BIDS)\n // Load the bids-spec schema ahead of any validation\n let schema\n if (options.schema) {\n schema = await schemaRegex(options.schema)\n schemaSetup(schema)\n }\n const files = await utils.files.readDir(dir, options)\n if (quickTest(files)) {\n // Is the dir using git-annex?\n const annexed = utils.files.remoteFiles.isGitAnnex(dir)\n fullTest(files, BIDS.options, annexed, dir, schema, callback)\n } else {\n // Return an error immediately if quickTest fails\n const issue = quickTestError(dir)\n BIDS.summary.totalFiles = Object.keys(files).length\n callback(utils.issues.format([issue], BIDS.summary, options))\n }\n }\n })\n}\n\nexport default start\n" }, { "alpha_fraction": 0.5908962488174438, "alphanum_fraction": 0.5982471108436584, "avg_line_length": 23.393102645874023, "blob_id": "48aa646f25b92866c51ba63bebd3caf6ba347633", "content_id": "ff31d4a925f44c4753e79dd2e2d3374da0c15459", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 3537, "license_type": "permissive", "max_line_length": 86, "num_lines": 145, "path": "/bids-validator/src/utils/output.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Utilities for formatting human readable output (CLI or other UIs)\n */\nimport { prettyBytes } from '../deps/prettyBytes.ts'\nimport { Table } from '../deps/cliffy.ts'\nimport { colors } from '../deps/fmt.ts'\nimport { ValidationResult, SummaryOutput } from '../types/validation-result.ts'\nimport { Issue } from '../types/issues.ts'\n\ninterface LoggingOptions {\n verbose: boolean\n}\n\n/**\n * Format for Unix consoles\n *\n * Returns the full output string with newlines\n */\nexport function consoleFormat(\n result: ValidationResult,\n options?: LoggingOptions,\n): string {\n const output = []\n if (result.issues.size === 0) {\n output.push(colors.green('This dataset appears to be BIDS compatible.'))\n } else {\n result.issues.forEach((issue) => output.push(formatIssue(issue, options)))\n }\n output.push('')\n output.push(formatSummary(result.summary))\n output.push('')\n return output.join('\\n')\n}\n\n/**\n * Format one issue as text with colors\n */\nfunction formatIssue(issue: Issue, options?: LoggingOptions): string {\n const severity = issue.severity\n const color = severity === 'error' ? 'red' : 'yellow'\n const output = []\n output.push(\n '\\t' +\n colors[color](\n `[${severity.toUpperCase()}] ${issue.reason} (${issue.key})`,\n ),\n )\n output.push('')\n let fileOutCount = 0\n issue.files.forEach((file) => {\n if (!options?.verbose && fileOutCount > 2) {\n return\n }\n output.push('\\t\\t.' + file.path)\n if (file.line) {\n let msg = '\\t\\t\\t@ line: ' + file.line\n if (file.character) {\n msg += ' character: ' + file.character\n }\n output.push(msg)\n }\n if (file.evidence) {\n output.push('\\t\\t\\tEvidence: ' + file.evidence)\n }\n fileOutCount++\n })\n if (!options?.verbose) {\n output.push('')\n output.push('\\t\\t' + issue.files.size + ' more files with the same issue')\n }\n output.push('')\n if (issue.helpUrl) {\n output.push(\n colors.cyan(\n '\\t' +\n 'Please visit ' +\n issue.helpUrl +\n ' for existing conversations about this issue.',\n ),\n )\n output.push('')\n }\n return output.join('\\n')\n}\n\n/**\n * Format for the summary\n */\nfunction formatSummary(summary: SummaryOutput): string {\n const output = []\n const numSessions = summary.sessions.length > 0 ? summary.sessions.length : 1\n\n // data\n const column1 = [\n summary.totalFiles + ' ' + 'Files' + ', ' + prettyBytes(summary.size),\n summary.subjects.length +\n ' - ' +\n 'Subjects ' +\n numSessions +\n ' - ' +\n 'Sessions',\n ],\n column2 = summary.tasks,\n column3 = summary.modalities\n\n const longestColumn = Math.max(column1.length, column2.length, column3.length)\n const pad = ' '\n\n // headers\n const headers = [\n pad,\n colors.magenta('Summary:') + pad,\n colors.magenta('Available Tasks:') + pad,\n colors.magenta('Available Modalities:'),\n ]\n\n // rows\n const rows = []\n for (let i = 0; i < longestColumn; i++) {\n const val1 = column1[i] ? column1[i] + pad : ''\n const val2 = column2[i] ? column2[i] + pad : ''\n const val3 = column3[i] ? column3[i] : ''\n rows.push([pad, val1, val2, val3])\n }\n const table = new Table()\n .header(headers)\n .body(rows)\n .border(false)\n .padding(1)\n .indent(2)\n .toString()\n\n output.push(table)\n\n output.push('')\n\n //Neurostars message\n output.push(\n colors.cyan(\n '\\tIf you have any questions, please post on https://neurostars.org/tags/bids.',\n ),\n )\n\n return output.join('\\n')\n}\n" }, { "alpha_fraction": 0.7307692170143127, "alphanum_fraction": 0.7307692170143127, "avg_line_length": 17.571428298950195, "blob_id": "741dfc0f90061fe531afe1881974085cbe8a2fc5", "content_id": "7e19d4c8562f0c6bf0b092f7dedf590404cd8b7b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 130, "license_type": "permissive", "max_line_length": 33, "num_lines": 7, "path": "/bids-validator/validators/events/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import events from './events'\nimport validate from './validate'\n\nexport default {\n events: events,\n validateEvents: validate,\n}\n" }, { "alpha_fraction": 0.6718212962150574, "alphanum_fraction": 0.6723940372467041, "avg_line_length": 30.178571701049805, "blob_id": "dce4e01bc408efa8d346678486b4f94b7eb5d254", "content_id": "79374c954b92ff28a1de32e1a587f1b70b173aa0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1746, "license_type": "permissive", "max_line_length": 209, "num_lines": 56, "path": "/bids-validator/src/setup/options.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { LevelName, LogLevelNames } from '../deps/logger.ts'\nimport { Command, EnumType } from '../deps/cliffy.ts'\n\nexport type ValidatorOptions = {\n datasetPath: string\n schema?: string\n legacy?: boolean\n json?: boolean\n verbose?: boolean\n ignoreNiftiHeaders?: boolean\n filenameMode?: boolean\n debug: LevelName\n}\n\n/**\n * Parse command line options and return a ValidatorOptions config\n * @param argumentOverride Override the arguments instead of using Deno.args\n */\nexport async function parseOptions(\n argumentOverride: string[] = Deno.args,\n): Promise<ValidatorOptions> {\n const { args, options } = await new Command()\n .name('bids-validator')\n .type('debugLevel', new EnumType(LogLevelNames))\n .description(\n 'This tool checks if a dataset in a given directory is compatible with the Brain Imaging Data Structure specification. To learn more about Brain Imaging Data Structure visit http://bids.neuroimaging.io',\n )\n .arguments('<dataset_directory>')\n .version('alpha')\n .option('--json', 'Output machine readable JSON')\n .option(\n '-s, --schema <type:string>',\n 'Specify a schema version to use for validation',\n {\n default: 'latest',\n },\n )\n .option('-v, --verbose', 'Log more extensive information about issues')\n .option(\n '--ignoreNiftiHeaders',\n 'Disregard NIfTI header content during validation',\n )\n .option('--debug <type:debugLevel>', 'Enable debug output', {\n default: 'ERROR',\n })\n .option(\n '--filenameMode',\n 'Enable filename checks for newline separated filenames read from stdin',\n )\n .parse(argumentOverride)\n return {\n datasetPath: args[0],\n ...options,\n debug: options.debug as LevelName,\n }\n}\n" }, { "alpha_fraction": 0.6519337296485901, "alphanum_fraction": 0.6602209806442261, "avg_line_length": 26.846153259277344, "blob_id": "b998a1d60d50b1cd8c8b465f0ff9d13a5947cb60", "content_id": "750c448c564aacb10d23b7cc7e49a957d291ca45", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 362, "license_type": "permissive", "max_line_length": 71, "num_lines": 13, "path": "/bids-validator/validators/microscopy/checkSamples.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nconst checkSamples = (fileList) => {\n const issues = []\n const samplesFile = Array.from(Object.values(fileList)).find(\n (file) => file.relativePath && file.relativePath == '/samples.tsv',\n )\n if (!samplesFile) {\n issues.push(new Issue({ code: 214 }))\n }\n return issues\n}\nexport default checkSamples\n" }, { "alpha_fraction": 0.6345270872116089, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 20.780000686645508, "blob_id": "4f2b995130b677ff5124b10f4a12516cd7ad56b6", "content_id": "bf8a1c151f1798ee3f32d2af6ce2131e6dd0b571", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1089, "license_type": "permissive", "max_line_length": 74, "num_lines": 50, "path": "/bids-validator/src/utils/logger.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import {\n setup,\n handlers,\n LevelName,\n getLogger,\n Logger,\n} from '../deps/logger.ts'\n\n/**\n * Setup a console logger used with the --debug flag\n */\nexport function setupLogging(level: LevelName) {\n setup({\n handlers: {\n console: new handlers.ConsoleHandler(level),\n },\n\n loggers: {\n '@bids/validator': {\n level,\n handlers: ['console'],\n },\n },\n })\n}\n\nexport function parseStack(stack: string) {\n const lines = stack.split('\\n')\n const caller = lines[2].trim()\n const token = caller.split('at ')\n return token[1]\n}\n\nconst loggerProxyHandler = {\n // deno-lint-ignore no-explicit-any\n get: function (_: any, prop: keyof Logger) {\n const logger = getLogger('@bids/validator')\n const stack = new Error().stack\n if (stack) {\n const callerLocation = parseStack(stack)\n logger.debug(`Logger invoked at \"${callerLocation}\"`)\n }\n const logFunc = logger[prop] as typeof logger.warning\n return logFunc.bind(logger)\n },\n}\n\nconst logger = new Proxy(getLogger('@bids/validator'), loggerProxyHandler)\n\nexport { logger }\n" }, { "alpha_fraction": 0.6553352475166321, "alphanum_fraction": 0.6553352475166321, "avg_line_length": 27.62162208557129, "blob_id": "301eb30768a8b7602c8f7b6e694530d40a3a6dbe", "content_id": "e32504f92df2547c69aa88174c78b0c48a58c6bd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1059, "license_type": "permissive", "max_line_length": 59, "num_lines": 37, "path": "/bids-validator/src/main.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { parseOptions } from './setup/options.ts'\nimport { readFileTree } from './files/deno.ts'\nimport { fileListToTree } from './files/browser.ts'\nimport { resolve } from './deps/path.ts'\nimport { validate } from './validators/bids.ts'\nimport { consoleFormat } from './utils/output.ts'\nimport { setupLogging } from './utils/logger.ts'\n\nexport async function main() {\n const options = await parseOptions(Deno.args)\n setupLogging(options.debug)\n const absolutePath = resolve(options.datasetPath)\n const tree = await readFileTree(absolutePath)\n\n // Run the schema based validator\n const schemaResult = await validate(tree, options)\n\n if (options.json) {\n console.log(\n JSON.stringify(schemaResult, (key, value) => {\n if (value instanceof Map) {\n return Array.from(value.values())\n } else {\n return value\n }\n }),\n )\n } else {\n console.log(\n consoleFormat(schemaResult, {\n verbose: options.verbose ? options.verbose : false,\n }),\n )\n }\n}\n\nexport { validate, fileListToTree }\n" }, { "alpha_fraction": 0.5771807432174683, "alphanum_fraction": 0.5824389457702637, "avg_line_length": 32.13365173339844, "blob_id": "7324c8e0d9562460a4a6264dcda8657e14a6fcd3", "content_id": "9d511479ecfeade066013365667e14a92a43ce69", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 13883, "license_type": "permissive", "max_line_length": 232, "num_lines": 419, "path": "/bids-validator/validators/tsv/validateTsvColumns.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\nimport nonCustomColumns from '../../bids_validator/tsv/non_custom_columns.json'\n\n/**\n * @param {Object} file - BIDS file object\n * Accepts file object and returns a type based on file path\n */\nexport const getTsvType = function (file) {\n let tsvType = 'misc'\n if (file.relativePath.includes('phenotype/')) {\n tsvType = 'phenotype'\n } else if (file.name === 'participants.tsv') {\n tsvType = 'participants'\n } else if (\n file.name.endsWith('_channels.tsv') ||\n file.name.endsWith('_electrodes.tsv') ||\n file.name.endsWith('_events.tsv') ||\n file.name.endsWith('_scans.tsv') ||\n file.name.endsWith('_sessions.tsv') ||\n file.name.endsWith('_aslcontext.tsv') ||\n file.name.endsWith('_blood.tsv') ||\n file.name.endsWith('_optodes.tsv')\n ) {\n const split = file.name.split('_')\n tsvType = split[split.length - 1].replace('.tsv', '')\n }\n return tsvType\n}\n\nconst getHeaders = (tsvContents) =>\n tsvContents\n .replace(/^\\uefff/, '')\n .split('\\n')[0]\n .trim()\n .split('\\t')\n\n/**\n *\n * @param {array} headers -Array of column names\n * @param {string} type - Type from getTsvType\n * Checks TSV column names to determine if they're core or custom\n * Returns array of custom column names\n */\nconst getCustomColumns = function (headers, type) {\n const customCols = []\n // Iterate column headers\n for (let col of headers) {\n // If it's a custom column\n if (!nonCustomColumns[type].includes(col)) {\n customCols.push(col)\n }\n }\n return customCols\n}\nconst commaSeparatedStringOf = (items) =>\n items.map((item) => `\"${item}\"`).join(', ')\n\n/**\n * Loads relevant JSON schema for given tsv modalities.\n * Currently only required for pet_blood.\n * @param {*} tsvs\n * @returns\n */\nconst loadSchemas = (tsvs) => {\n const schemas = {}\n const getSchemaByType = {\n blood: () => require('../json/schemas/pet_blood.json'),\n }\n const types = new Set(tsvs.map((tsv) => getTsvType(tsv.file)))\n types.forEach((type) => {\n if (getSchemaByType.hasOwnProperty(type)) {\n schemas[type] = getSchemaByType[type]()\n }\n })\n return schemas\n}\n\n/**\n *\n * @param {array} tsvs - Array of objects containing TSV file objects and contents\n * @param {Object} jsonContentsDict\n */\nconst validateTsvColumns = function (tsvs, jsonContentsDict, headers) {\n const tsvIssues = []\n const schemas = loadSchemas(tsvs)\n\n tsvs.map((tsv) => {\n const tsvType = getTsvType(tsv.file)\n const customColumns = getCustomColumns(getHeaders(tsv.contents), tsvType)\n const isPetBlood = tsvType === 'blood'\n if (customColumns.length > 0 || isPetBlood) {\n // Get merged data dictionary for this file\n const potentialSidecars = utils.files.potentialLocations(\n tsv.file.relativePath.replace('.tsv', '.json'),\n )\n const mergedDict = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n const keys = Object.keys(mergedDict)\n // Gather undefined columns for the file\n const undefinedCols = customColumns.filter((col) => !keys.includes(col))\n // Create an issue for all undefined columns in this file\n undefinedCols.length &&\n tsvIssues.push(\n customColumnIssue(\n tsv.file,\n undefinedCols.join(', '),\n potentialSidecars,\n ),\n )\n\n if (isPetBlood) {\n // Check PET tsv headers required by json sidecar\n const petBloodHeaderIssues = validatePetBloodHeaders(\n tsv,\n mergedDict,\n schemas['blood'],\n )\n tsvIssues.push(...petBloodHeaderIssues)\n }\n }\n })\n // Return array of all instances of undescribed custom columns\n\n // Manage custom instances made from asl_context\n const aslTsvIssues = validateASL(tsvs, jsonContentsDict, headers)\n tsvIssues.push(...aslTsvIssues)\n\n return tsvIssues\n}\n\n/**\n * Validates that tsv columns required by\n * @param {*} tsv\n * @param {*} mergedDict\n * @param {*} schema\n * @returns\n */\nexport const validatePetBloodHeaders = (tsv, mergedDict, schema) => {\n const tsvIssues = []\n const headers = getHeaders(tsv.contents)\n\n // Collect required headers and the JSON sidecar properties that require them.\n const requiredHeaders = {}\n Object.entries(schema.properties).forEach(([property, subSchema]) => {\n if (\n subSchema.hasOwnProperty('requires_tsv_non_custom_columns') &&\n mergedDict[property] === true\n ) {\n subSchema.requires_tsv_non_custom_columns.forEach((header) => {\n if (header in requiredHeaders) {\n requiredHeaders[header].push(property)\n } else {\n requiredHeaders[header] = [property]\n }\n })\n }\n })\n Object.entries(requiredHeaders).forEach(([requiredHeader, requiredBy]) => {\n if (!headers.includes(requiredHeader)) {\n tsvIssues.push(\n new Issue({\n code: 211,\n file: tsv.file,\n evidence: `${tsv.file.name} has headers: ${commaSeparatedStringOf(\n headers,\n )}; missing header \"${requiredHeader}\", which is required when any of the properties (${commaSeparatedStringOf(\n requiredBy,\n )}) are true in the associated JSON sidecar.`,\n }),\n )\n }\n })\n return tsvIssues\n}\n\nconst validateASL = (tsvs, jsonContentsDict, headers) => {\n const tsvIssues = []\n // Manage custom instances from asl_context tsv files\n // get all headers associated with asl_context data\n tsvs.map((tsv) => {\n const aslHeaders = headers.filter((header) => {\n const file = header[0]\n return file.relativePath.includes('_asl')\n })\n\n aslHeaders.forEach((aslHeader) => {\n // extract the fourth element of 'dim' field of header - this is the\n // number of volumes that were obtained during scan (numVols)\n const file = aslHeader[0]\n const header = aslHeader[1]\n const dim = header.dim\n const numVols = dim[4]\n\n // get the _asl_context.tsv associated with this asl scan\n const potentialAslContext = utils.files.potentialLocations(\n file.relativePath\n .replace('.gz', '')\n .replace('asl.nii', 'aslcontext.tsv'),\n )\n const associatedAslContext = potentialAslContext.indexOf(\n tsv.file.relativePath,\n )\n\n if (associatedAslContext > -1) {\n const rows = tsv.contents\n .replace(/[\\r]+/g, '')\n .split('\\n')\n .filter((row) => !(!row || /^\\s*$/.test(row)))\n\n const m0scan_filters = ['m0scan']\n const filtered_m0scan_rows = rows.filter((row) =>\n m0scan_filters.includes(row),\n )\n\n const asl_filters = [\n 'cbf',\n 'm0scan',\n 'label',\n 'control',\n 'deltam',\n 'volume_type',\n ]\n const filtered_tsv_rows = rows.filter((row) =>\n asl_filters.includes(row),\n )\n if (rows.length != filtered_tsv_rows.length) {\n tsvIssues.push(\n new Issue({\n code: 176,\n file: file,\n }),\n )\n }\n\n if (rows.length - 1 != numVols) {\n tsvIssues.push(\n new Issue({\n code: 165,\n file: file,\n }),\n )\n }\n\n // get the json sidecar dictionary associated with that nifti scan\n var potentialSidecars = utils.files.potentialLocations(\n tsv.file.relativePath.replace('aslcontext.tsv', 'asl.json'),\n )\n\n // get merged data dictionary for this file\n const mergedDict = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n\n // check M0Type and tsv list for m0scan in case of an Included M0Type\n if (\n mergedDict.hasOwnProperty('M0Type') &&\n mergedDict['M0Type'] === 'Included' &&\n filtered_m0scan_rows.length < 1\n ) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 154,\n reason:\n \"''M0Type' is set to 'Included' however the tsv file does not contain any m0scan volume.\",\n }),\n )\n }\n // check M0Type and tsv list for m0scan in case of an Absent M0Type\n if (\n mergedDict.hasOwnProperty('M0Type') &&\n mergedDict['M0Type'] === 'Absent' &&\n filtered_m0scan_rows.length >= 1\n ) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 199,\n reason:\n \"''M0Type' is set to 'Absent' however the tsv file contains an m0scan volume. This should be avoided.\",\n }),\n )\n }\n\n // check Flip Angle requirements with LookLocker acquisitions\n if (\n mergedDict.hasOwnProperty('FlipAngle') &&\n mergedDict['FlipAngle'].constructor === Array\n ) {\n let FlipAngle = mergedDict['FlipAngle']\n const FlipAngleLength = FlipAngle.length\n if (FlipAngleLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 172,\n reason:\n \"''FlipAngle' for this file does not match the TSV length. Please make sure that the size of the FlipAngle array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n // check Labelling Duration matching with TSV length only for PCASL or CASL\n if (\n mergedDict.hasOwnProperty('LabelingDuration') &&\n mergedDict['LabelingDuration'].constructor === Array &&\n mergedDict.hasOwnProperty('ArterialSpinLabelingType') &&\n (mergedDict['ArterialSpinLabelingType'] == 'CASL' ||\n mergedDict['ArterialSpinLabelingType'] == 'PCASL')\n ) {\n let LabelingDuration = mergedDict['LabelingDuration']\n const LabelingDurationLength = LabelingDuration.length\n if (LabelingDurationLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 175,\n reason:\n \"''LabelingDuration' for this file does not match the TSV length. Please be sure that the size of the LabelingDuration array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n\n // check VolumeTiming with TSV length\n if (\n mergedDict.hasOwnProperty('RepetitionTimePreparation') &&\n mergedDict['RepetitionTimePreparation'].constructor === Array\n ) {\n let RepetitionTimePreparation =\n mergedDict['RepetitionTimePreparation']\n const RepetitionTimePreparationLength =\n RepetitionTimePreparation.length\n if (RepetitionTimePreparationLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 177,\n reason:\n \"''RepetitionTimePreparation' for this file do not match the TSV length. Please be sure that the size of the RepetitionTimePreparation array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n\n // check Post Labelling Delays matching with TSV length\n if (\n mergedDict.hasOwnProperty('PostLabelingDelay') &&\n mergedDict['PostLabelingDelay'].constructor === Array\n ) {\n let PostLabelingDelay = mergedDict['PostLabelingDelay']\n const PostLabelingDelayLength = PostLabelingDelay.length\n if (PostLabelingDelayLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 174,\n reason:\n \"''PostLabelingDelay' for this file do not match the TSV length. Please be sure that the size of the PostLabelingDelay array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n\n if (mergedDict.hasOwnProperty('TotalAcquiredVolumes')) {\n let TotalAcquiredVolumes = mergedDict['TotalAcquiredVolumes']\n const TotalAcquiredVolumesLength = TotalAcquiredVolumes.length\n if (TotalAcquiredVolumesLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 181,\n reason:\n \"''TotalAcquiredVolumes' for this file do not match the TSV length. Please be sure that the size of the TotalAcquiredVolumes array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n\n if (\n mergedDict.hasOwnProperty('EchoTime') &&\n mergedDict['EchoTime'].constructor === Array\n ) {\n let EchoTime = mergedDict['EchoTime']\n const EchoTimeLength = EchoTime.length\n if (EchoTimeLength !== rows.length - 1) {\n tsvIssues.push(\n new Issue({\n file: file,\n code: 196,\n reason:\n \"''EchoTime' for this file do not match the TSV length. Please be sure that the size of the EchoTime array in the json corresponds to the number of volume listed in the tsv file.\",\n }),\n )\n }\n }\n }\n })\n })\n return tsvIssues\n}\n\nconst customColumnIssue = function (file, col, locations) {\n return new Issue({\n code: 82,\n file: file,\n evidence:\n 'Columns: ' +\n col +\n ' not defined, please define in: ' +\n locations.toString().replace(',', ', '),\n })\n}\n\nexport default validateTsvColumns\n" }, { "alpha_fraction": 0.5747899413108826, "alphanum_fraction": 0.5806722640991211, "avg_line_length": 31.16216278076172, "blob_id": "4586c37a136597c0e370968ae2138a16c897f5ab", "content_id": "de9368ef0c788d051ea4b31994ed5a15a3777d16", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1190, "license_type": "permissive", "max_line_length": 79, "num_lines": 37, "path": "/bids-validator/validators/tsv/checkStatusCol.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\n/**\n * Checks status column in a electroencephalography _channels.tsv file to\n * ensure its values are only * 'good', 'bad', or 'n/a'\n * @param {string[]} rows - Each row of a tsv file to be checked.\n * @param {Object} file - File of rows being checked, used for error message if\n * problem is found.\n * @param {Object[]} issues - Array of issue objects to add to if problem is\n * found.\n * @returns {null} Results of this function are stored in issues.\n */\nconst checkStatusCol = function (rows, file, issues) {\n const header = rows[0]\n const statusColumn = header.indexOf('status')\n if (statusColumn !== -1) {\n for (let i = 1; i < rows.length; i++) {\n const line = rows[i]\n const status = line[statusColumn]\n if (status !== 'good' && status !== 'bad' && status != 'n/a') {\n issues.push(\n new Issue({\n file: file,\n evidence: line.toString(),\n line: i + 1,\n reason:\n 'the status column values should either be good, bad, or n/a',\n code: 125,\n }),\n )\n }\n }\n }\n return\n}\n\nexport default checkStatusCol\n" }, { "alpha_fraction": 0.6858974099159241, "alphanum_fraction": 0.6858974099159241, "avg_line_length": 16.33333396911621, "blob_id": "e567a14204a2cc9ce815389ff938feedcbd232c1", "content_id": "e5555e84126fcc46790269359862c8f5288fe955", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 156, "license_type": "permissive", "max_line_length": 33, "num_lines": 9, "path": "/bids-validator/validators/json/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import json from './json'\nimport validate from './validate'\nimport load from './load'\n\nexport default {\n json: json,\n validate: validate,\n load: load,\n}\n" }, { "alpha_fraction": 0.5863813161849976, "alphanum_fraction": 0.6035992503166199, "avg_line_length": 27.79551887512207, "blob_id": "fd8869de3fb77ecee3e01aa37bbdc40c06b41219", "content_id": "40a6e88b3e73c4da6c53f3691a39ebb7bf458aca", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10280, "license_type": "permissive", "max_line_length": 116, "num_lines": 357, "path": "/bids-validator/tests/bids.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * eslint no-console: [\"error\", { allow: [\"log\"] }]\n */\nimport { assert } from 'chai'\n\nimport validate from '../index.js'\nimport fs from 'fs'\nimport path from 'path'\nimport { createFileList } from './env/FileList.js'\nimport isNode from '../utils/isNode.js'\n\nfunction getDirectories(srcpath) {\n return fs.readdirSync(srcpath).filter(function (file) {\n return (\n file !== '.git' && fs.statSync(path.join(srcpath, file)).isDirectory()\n )\n })\n}\n\nconst missing_session_files = [\n '7t_trt',\n 'ds004332',\n 'ds006',\n 'ds007',\n 'ds008',\n 'ds051',\n 'ds052',\n 'ds105',\n 'ds109',\n 'ds113b',\n 'ds000117',\n 'ds000247',\n 'ieeg_motorMiller2007',\n 'ieeg_visual',\n 'eeg_ds003654s_hed_inheritance',\n 'eeg_ds003645s_hed_inheritance',\n 'motion_dualtask',\n]\n\nconst dataDirectory = path.join('bids-validator', 'tests', 'data')\n\n// Generate validate.BIDS input for included minimal tests\nfunction createDatasetFileList(inputPath) {\n const testDatasetPath = path.join(dataDirectory, inputPath)\n if (!isNode) {\n return createFileList(testDatasetPath)\n } else {\n return testDatasetPath\n }\n}\n\n// Generate validate.BIDS input for bids-examples\nfunction createExampleFileList(inputPath) {\n return createDatasetFileList(path.join('bids-examples', inputPath))\n}\n\nfunction assertErrorCode(errors, expected_error_code) {\n const matchingErrors = errors.filter(function (error) {\n return error.code === expected_error_code\n })\n assert(matchingErrors.length > 0)\n}\n\ndescribe('BIDS example datasets ', function () {\n // Default validate.BIDS options\n const options = { ignoreNiftiHeaders: true, json: true }\n const enableNiftiHeaders = { json: true }\n\n describe('basic example dataset tests', () => {\n const bidsExamplePath = path.join(dataDirectory, 'bids-examples')\n getDirectories(bidsExamplePath).forEach(function testDataset(inputPath) {\n it(inputPath, (isdone) => {\n validate.BIDS(\n createExampleFileList(inputPath),\n options,\n function (issues) {\n let warnings = issues.warnings\n let session_flag = false\n for (const warning in warnings) {\n if (warnings[warning]['code'] === 38) {\n session_flag = true\n break\n }\n }\n if (missing_session_files.indexOf(inputPath) === -1) {\n assert.deepEqual(session_flag, false)\n } else {\n assert.deepEqual(session_flag, true)\n }\n isdone()\n },\n )\n })\n })\n })\n\n // we need to have at least one non-dynamic test\n it('validates path without trailing backslash', function (isdone) {\n validate.BIDS(\n createExampleFileList('ds001'),\n options,\n function (issues, summary) {\n const errors = issues.errors\n const warnings = issues.warnings\n assert(summary.sessions.length === 0)\n assert(summary.subjects.length === 16)\n assert.deepEqual(summary.tasks, ['balloon analog risk task'])\n expect(summary.modalities).toEqual(['MRI'])\n assert(summary.totalFiles === 134)\n assert.deepEqual(errors.length, 1)\n assert(warnings.length === 2)\n assert(\n warnings.findIndex((warning) => warning.code === 13) > -1,\n 'warnings do not contain a code 13',\n )\n isdone()\n },\n )\n })\n\n // we need to have at least one non-dynamic test\n it('validates dataset with valid nifti headers', function (isdone) {\n const options = { ignoreNiftiHeaders: false }\n validate.BIDS(\n createDatasetFileList('valid_headers'),\n options,\n function (issues, summary) {\n const errors = issues.errors\n const warnings = issues.warnings\n assert(summary.sessions.length === 0)\n assert(summary.subjects.length === 1)\n assert.deepEqual(summary.tasks, ['rhyme judgment'])\n assert.isFalse(summary.dataProcessed)\n expect(summary.modalities).toEqual(['MRI'])\n expect(summary.totalFiles).toEqual(8)\n assert(\n errors.findIndex((error) => error.code === 60) > -1,\n 'errors do not contain a code 60',\n )\n assert.deepEqual(warnings.length, 4)\n assert(\n warnings.findIndex((warning) => warning.code === 13) > -1,\n 'warnings do not contain a code 13',\n )\n assert.deepEqual(summary.subjectMetadata[0], {\n age: 25,\n participantId: '01',\n sex: 'M',\n })\n isdone()\n },\n )\n })\n\n // test for duplicate files present with both .nii and .nii.gz extension\n it('validates dataset for duplicate files present with both .nii and .nii.gz extension', function (isdone) {\n validate.BIDS(\n createDatasetFileList('valid_filenames'),\n enableNiftiHeaders,\n function (issues) {\n assertErrorCode(issues.errors, 74)\n isdone()\n },\n )\n })\n\n it('includes issue 53 NO_T1W for dataset without T1w files', function (isdone) {\n validate.BIDS(createDatasetFileList('no_t1w'), options, function (issues) {\n assertErrorCode(issues.ignored, 53)\n isdone()\n })\n })\n\n // test for illegal characters used in acq and task name\n it('validates dataset with illegal characters in task name', function (isdone) {\n validate.BIDS(\n createDatasetFileList('valid_filenames'),\n enableNiftiHeaders,\n function (issues) {\n assertErrorCode(issues.errors, 58)\n isdone()\n },\n )\n })\n\n // test for illegal characters used in sub name\n it('validates dataset with illegal characters in sub name', function (isdone) {\n validate.BIDS(\n createDatasetFileList('valid_filenames'),\n enableNiftiHeaders,\n function (issues) {\n assertErrorCode(issues.errors, 64)\n isdone()\n },\n )\n })\n\n it('checks for subjects with no valid data', function (isdone) {\n validate.BIDS(\n createDatasetFileList('no_valid_data'),\n options,\n function (issues) {\n assertErrorCode(issues.errors, 67)\n isdone()\n },\n )\n })\n\n it('validates MRI modalities', function (isdone) {\n validate.BIDS(\n createExampleFileList('ds001'),\n options,\n function (issues, summary) {\n var errors = issues.errors\n var warnings = issues.warnings\n assert(summary.sessions.length === 0)\n assert(summary.subjects.length === 16)\n assert.deepEqual(summary.tasks, ['balloon analog risk task'])\n assert(summary.modalities.includes('MRI'))\n assert(summary.totalFiles === 134)\n assert.deepEqual(errors.length, 1)\n assert(warnings.length === 2)\n assert(\n warnings.findIndex((warning) => warning.code === 13) > -1,\n 'warnings do not contain a code 13',\n )\n isdone()\n },\n )\n })\n\n it('blacklists modalities specified', function (isdone) {\n const _options = { ...options, blacklistModalities: ['MRI'] }\n validate.BIDS(\n createExampleFileList('ds001'),\n _options,\n function (issues, summary) {\n var errors = issues.errors\n var warnings = issues.warnings\n assert(summary.sessions.length === 0)\n assert(summary.subjects.length === 16)\n assert.deepEqual(summary.tasks, ['balloon analog risk task'])\n assert(summary.modalities.includes('MRI'))\n assert(summary.totalFiles === 134)\n assert.deepEqual(errors.length, 2)\n assert(warnings.length === 2)\n assert(\n warnings.findIndex((warning) => warning.code === 13) > -1,\n 'warnings do not contain a code 13',\n )\n assert(\n errors.findIndex((error) => error.code === 139) > -1,\n 'errors do contain a code 139',\n )\n\n isdone()\n },\n )\n })\n\n it('checks for data dictionaries without corresponding data files', function (isdone) {\n validate.BIDS(\n createDatasetFileList('unused_data_dict'),\n options,\n function (issues) {\n assert.notEqual(\n issues.errors.findIndex((issue) => issue.code === 90),\n -1,\n )\n isdone()\n },\n )\n })\n\n it('checks for fieldmaps with no _magnitude file', function (isdone) {\n validate.BIDS(\n createDatasetFileList('fieldmap_without_magnitude'),\n options,\n function (issues) {\n assert.notEqual(\n issues.errors.findIndex((issue) => issue.code === 91),\n -1,\n )\n isdone()\n },\n )\n })\n\n it('should not throw a warning if all _phasediff.nii are associated with _magnitude1.nii', function (isdone) {\n validate.BIDS(\n createExampleFileList('hcp_example_bids'),\n options,\n function (issues) {\n assert.deepEqual(issues.errors, [])\n isdone()\n },\n )\n })\n\n it('should throw a warning if there are _phasediff.nii without an associated _magnitude1.nii', function (isdone) {\n validate.BIDS(\n createDatasetFileList('phasediff_without_magnitude1'),\n options,\n function (issues) {\n assert.notEqual(issues.warnings.findIndex((issue) => issue.code === 92))\n isdone()\n },\n )\n })\n\n it('should not throw an error if it encounters no non-utf-8 files', function (isdone) {\n validate.BIDS(\n createDatasetFileList('valid_dataset'),\n options,\n function (issues) {\n assert.equal(\n issues.errors.findIndex((issue) => issue.code === 123),\n -1,\n )\n isdone()\n },\n )\n })\n\n it('should validate pet data', function (isdone) {\n validate.BIDS(\n createDatasetFileList('broken_pet_example_2-pet_mri'),\n options,\n function (issues) {\n assertErrorCode(issues.errors, 55)\n isdone()\n },\n )\n })\n\n it('should validate pet blood data', function (isdone) {\n validate.BIDS(\n createDatasetFileList('broken_pet_example_3-pet_blood'),\n options,\n function (issues) {\n assertErrorCode(issues.errors, 55)\n isdone()\n },\n )\n })\n\n it('should catch missing tsv columns', function (isdone) {\n validate.BIDS(\n createDatasetFileList('pet_blood_missing_tsv_column'),\n options,\n function (issues) {\n assertErrorCode(issues.errors, 211)\n isdone()\n },\n )\n })\n})\n" }, { "alpha_fraction": 0.5584415793418884, "alphanum_fraction": 0.559969425201416, "avg_line_length": 24.66666603088379, "blob_id": "67c7c06298b8c8daf26e5a96fbe908f9136c2c8f", "content_id": "d400fa249cfd1a23beb3d5fa938ae224755c83a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1309, "license_type": "permissive", "max_line_length": 78, "num_lines": 51, "path": "/bids-validator-web/components/Validate.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport Upload from './Upload.jsx'\nimport Spinner from './Spinner.jsx'\nimport Options from './Options.jsx'\n\nclass Validate extends React.Component {\n constructor(props) {\n super(props)\n this.state = {\n loading: false,\n }\n\n this._clearInput = this.props.reset\n this._onChange = this.props.onChange\n this._setRefs = this.props.setRefs\n }\n\n // life cycle events --------------------------------------------------\n\n render() {\n let { options, handleOptionToggle } = this.props\n let loading = <Spinner text=\"validating\" active={true} />\n\n let select = (\n <div className=\"card-header\">\n <h3>\n Select a{' '}\n <a href=\"https://bids.neuroimaging.io\" target=\"_blank\">\n BIDS dataset\n </a>{' '}\n to validate\n </h3>\n <Upload\n onClick={this._clearInput}\n onChange={this._onChange}\n setRefs={this._setRefs}\n />\n <hr />\n <Options setOption={handleOptionToggle} options={options} />\n <small>\n Note: Selecting a dataset only performs validation. Files are never\n uploaded.\n </small>\n </div>\n )\n\n return <div className=\"card\">{this.props.loading ? loading : select}</div>\n }\n}\n\nexport default Validate\n" }, { "alpha_fraction": 0.5718598961830139, "alphanum_fraction": 0.573369562625885, "avg_line_length": 30.245283126831055, "blob_id": "b11a1eac19f61544a9e8d9cf460cf7b8ce3eebdc", "content_id": "77a66f0c77369ee5a23eb3bd72bb5a04a99240ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3312, "license_type": "permissive", "max_line_length": 74, "num_lines": 106, "path": "/bids-validator/utils/options.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import path from 'path'\nimport getFileStats from '../utils/files/getFileStats'\nimport readFile from '../utils/files/readFile'\nimport json from './json'\nimport isNode from '../utils/isNode.js'\n\nlet options\n\nexport default {\n /**\n * Parse\n */\n parse: function (dir, args, callback) {\n options = args ? args : {}\n options = {\n ignoreWarnings: Boolean(options.ignoreWarnings),\n ignoreNiftiHeaders: Boolean(options.ignoreNiftiHeaders),\n ignoreSymlinks: Boolean(options.ignoreSymlinks),\n ignoreSubjectConsistency: Boolean(options.ignoreSubjectConsistency),\n blacklistModalities: options.blacklistModalities,\n verbose: Boolean(options.verbose),\n gitTreeMode: Boolean(options.gitTreeMode),\n remoteFiles: Boolean(options.remoteFiles),\n gitRef: options.gitRef || 'HEAD',\n config: options.config || {},\n schema: options.schema !== 'disable' ? options.schema : false,\n }\n if (options.config && typeof options.config !== 'boolean') {\n this.parseConfig(dir, options.config, function (issues, config) {\n options.config = config\n callback(issues, options)\n })\n } else {\n callback(null, options)\n }\n },\n\n getOptions: () => {\n const readonlyOptions = Object.freeze({ ...options })\n return readonlyOptions\n },\n\n /**\n * Load Config\n */\n loadConfig: function (dir, config, callback) {\n if (typeof config === 'string') {\n let configFile\n if (isNode) {\n const configPath = path.isAbsolute(config)\n ? config\n : path.join(dir, config)\n configFile = { path: configPath }\n } else {\n // Grab file from FileList if a path was provided\n configFile = [...dir].find((f) => f.webkitRelativePath === config)\n // No matching config, return a default\n if (!configFile) {\n return callback(null, configFile, JSON.stringify({}))\n }\n }\n configFile.stats = getFileStats(configFile)\n readFile(configFile)\n .then((contents) => {\n callback(null, configFile, contents)\n })\n .catch((issue) => {\n // If the config does not exist, issue 44 is returned\n if (issue.code === 44) {\n callback(null, configFile, JSON.stringify({}))\n } else {\n callback([issue], configFile, null)\n }\n })\n } else if (typeof config === 'object') {\n callback(null, { path: 'config' }, JSON.stringify(config))\n }\n },\n\n /**\n * Parse Config\n */\n parseConfig: function (dir, config, callback) {\n this.loadConfig(dir, config, function (issues, file, contents) {\n if (issues) {\n callback(issues, null)\n } else {\n json.parse(file, contents).then(({ issues, parsed: jsObj }) => {\n if (issues && issues.length > 0) {\n callback(issues, null)\n } else {\n const parsedConfig = {\n ignore: jsObj.ignore ? [].concat(jsObj.ignore) : [],\n warn: jsObj.warn ? [].concat(jsObj.warn) : [],\n error: jsObj.error ? [].concat(jsObj.error) : [],\n ignoredFiles: jsObj.ignoredFiles\n ? [].concat(jsObj.ignoredFiles)\n : [],\n }\n callback(null, parsedConfig)\n }\n })\n }\n })\n },\n}\n" }, { "alpha_fraction": 0.6581963300704956, "alphanum_fraction": 0.6586945652961731, "avg_line_length": 22.611764907836914, "blob_id": "5332047157071d5bdfe24121405840d15b5fd028", "content_id": "40232a748e86c39c7e4aa7f7dac03029a0289bfa", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": true, "language": "JavaScript", "length_bytes": 2007, "license_type": "permissive", "max_line_length": 83, "num_lines": 85, "path": "/bids-validator/tests/env/FileList.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * for use in test suites using File & FileList browser APIs in jsdom environment\n */\n\nimport fs from 'fs'\n\nimport path from 'path'\nimport mime from 'mime-types'\n\nfunction createFileList(dir) {\n const str = dir.substr(dir.lastIndexOf(path.sep) + 1) + '$'\n const rootpath = dir.replace(new RegExp(str), '')\n const paths = getFilepaths(dir, [], rootpath)\n return paths.map((path) => {\n return createFile(path, path.replace(rootpath, ''))\n })\n}\n\nfunction getFilepaths(dir, files_) {\n files_ = files_ || []\n const files = fs.readdirSync(dir)\n files\n .map((file) => path.join(dir, file))\n .map((path) =>\n isDirectory(path) ? getFilepaths(path, files_) : files_.push(path),\n )\n return files_\n}\n\nfunction isDirectory(path) {\n const pathStat = fs.lstatSync(path)\n let isDir = pathStat.isDirectory()\n if (pathStat.isSymbolicLink()) {\n try {\n var targetPath = fs.realpathSync(path)\n isDir = fs.lstatSync(targetPath).isDirectory()\n } catch (err) {\n isDir = false\n }\n }\n return isDir\n}\n\nfunction addFileList(input, file_paths) {\n if (typeof file_paths === 'string') file_paths = [file_paths]\n else if (!Array.isArray(file_paths)) {\n throw new Error(\n 'file_paths needs to be a file path string or an Array of file path strings',\n )\n }\n\n const file_list = file_paths.map((fp) => createFile(fp))\n file_list.__proto__ = Object.create(FileList.prototype)\n\n Object.defineProperty(input, 'files', {\n value: file_list,\n writable: false,\n })\n\n return input\n}\n\nfunction createFile(file_path, relativePath) {\n const file = fs.statSync(file_path)\n\n const browserFile = new File(\n [new fs.readFileSync(file_path)],\n path.basename(file_path),\n {\n type: mime.lookup(file_path) || '',\n lastModified: file.mtimeMs,\n },\n )\n browserFile.webkitRelativePath = relativePath || file_path\n\n return browserFile\n}\n\nexport { addFileList, createFile, createFileList }\n\nexport default {\n addFileList,\n createFile,\n createFileList,\n}\n" }, { "alpha_fraction": 0.6932951807975769, "alphanum_fraction": 0.7133534550666809, "avg_line_length": 76.57469177246094, "blob_id": "7552f3409cc4d3d80aa487f56aa7abe597b2c370", "content_id": "7e61645d390df5508d661db5991b675281fdbae2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 37778, "license_type": "permissive", "max_line_length": 1370, "num_lines": 482, "path": "/bids-validator/README.md", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "[![Node Tests](https://github.com/bids-standard/bids-validator/actions/workflows/node_tests.yml/badge.svg)](https://github.com/bids-standard/bids-validator/actions/workflows/node_tests.yml)\n[![Python tests](https://github.com/bids-standard/bids-validator/actions/workflows/python_tests.yml/badge.svg)](https://github.com/bids-standard/bids-validator/actions/workflows/python_tests.yml)\n[![bids-examples tests](https://github.com/bids-standard/bids-validator/actions/workflows/test-bids-examples.yml/badge.svg)](https://github.com/bids-standard/bids-validator/actions/workflows/test-bids-examples.yml)\n[![CircleCI](https://circleci.com/gh/bids-standard/bids-validator.svg?style=shield&circle-token=:circle-token)](https://circleci.com/gh/bids-standard/bids-validator)\n[![Codecov](https://codecov.io/gh/bids-standard/bids-validator/branch/master/graph/badge.svg)](https://codecov.io/gh/bids-standard/bids-validator)\n[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3688707.svg)](https://doi.org/10.5281/zenodo.3688707)\n\n# BIDS-Validator\n\n- [BIDS-Validator](#bids-validator)\n - [Quickstart](#quickstart)\n - [Support](#support)\n - [Maintainers and Contributors](#maintainers-and-contributors)\n - [Use](#use)\n - [API](#api)\n - [.bidsignore](#bidsignore)\n - [Configuration](#configuration)\n - [In the Browser](#in-the-browser)\n - [On the Server](#on-the-server)\n - [Through Command Line](#through-command-line)\n - [Docker image](#docker-image)\n - [Python Library](#python-library)\n - [Example](#example)\n - [Development](#development)\n - [Running Locally in a Browser](#running-locally-in-a-browser)\n - [Testing](#testing)\n - [Publishing](#publishing)\n - [Acknowledgments](#acknowledgments)\n\n## Quickstart\n\n1. Web version:\n 1. Open [Google Chrome](https://www.google.com/chrome/) or\n [Mozilla Firefox](https://mozilla.org/firefox) (currently the only\n supported browsers)\n 1. Go to https://bids-standard.github.io/bids-validator/ and select a folder\n with your BIDS dataset. If the validator seems to be working longer than\n couple of minutes please open [developer tools ](https://developer.chrome.com/devtools)\n and report the error at [https://github.com/bids-standard/bids-validator/issues](https://github.com/bids-standard/bids-validator/issues).\n1. Command line version:\n 1. Install [Node.js](https://nodejs.org) (at least version 18.0.0)\n 1. Update `npm` to be at least version 7 (`npm install --global npm@^7`)\n 1. From a terminal run `npm install -g bids-validator`\n 1. Run `bids-validator` to start validating datasets.\n1. Docker\n 1. Install Docker\n 1. From a terminal run `docker run -ti --rm -v /path/to/data:/data:ro bids/validator /data`\n but replace the `/path/to/data` part of the command with your own path on your machine.\n1. Python Library:\n 1. Install [Python](https://www.python.org/)\n 1. Install [Pip](https://pip.pypa.io/en/stable/installing/) package manager for Python, if\n not already installed.\n 1. From a terminal run `pip install bids_validator` to acquire the\n [BIDS Validator PyPI package](https://pypi.org/project/bids-validator/)\n or `conda install bids-validator` for the\n [Conda package](https://anaconda.org/conda-forge/bids-validator).\n 1. Open a Python terminal and type: `python`\n 1. Import the BIDS Validator package `from bids_validator import BIDSValidator`\n 1. Check if a file is BIDS compatible `BIDSValidator().is_bids('path/to/a/bids/file')`\n\n## Support\n\nThe BIDS Validator is designed to work in both the browser and in Node.js. We\ntarget support for the latest long term stable (LTS) release of Node.js and the\nlatest version of Chrome.\n\nThere is also a library of helper functions written in Python, for use with BIDS\ncompliant applications written in this language.\n\nPlease report any issues you experience while using these support targets via\nthe [GitHub issue tracker](https://github.com/bids-standard/bids-validator/issues).\nIf you experience issues outside of these supported environments and believe we\nshould extend our targeted support feel free to open a new issue describing the\nissue, your support target and why you require extended support and we will\naddress these issues on a case by case basis.\n\n## Maintainers and Contributors\n\n<!-- ALL-CONTRIBUTORS-BADGE:START - Do not remove or modify this section -->\n\n[![All Contributors](https://img.shields.io/badge/all_contributors-43-orange.svg?style=flat-square)](#contributors-)\n\n<!-- ALL-CONTRIBUTORS-BADGE:END -->\n\nThis project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification.\nContributions of any kind are welcome!\n\nThe project is maintained by [@rwblair](https://github.com/rwblair/) with the help of many contributors listed below.\n(The [emoji key](https://allcontributors.org/docs/en/emoji-key) is indicating the kind of contribution)\n\nPlease also see [Acknowledgments](#acknowledgments).\n\n<!-- ALL-CONTRIBUTORS-LIST:START - Do not remove or modify this section -->\n<!-- prettier-ignore-start -->\n<!-- markdownlint-disable -->\n<table>\n <tr>\n <td align=\"center\"><a href=\"https://adam2392.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/3460267?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Adam Li</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=adam2392\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=adam2392\" title=\"Tests\">⚠️</a> <a href=\"#userTesting-adam2392\" title=\"User Testing\">📓</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Aadam2392\" title=\"Bug reports\">🐛</a></td>\n <td align=\"center\"><a href=\"https://github.com/agt24\"><img src=\"https://avatars.githubusercontent.com/u/7869017?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Adam Thomas</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=agt24\" title=\"Documentation\">📖</a></td>\n <td align=\"center\"><a href=\"http://happy5214.freedynamicdns.org/\"><img src=\"https://avatars.githubusercontent.com/u/2992751?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Alexander Jones</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=happy5214\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=happy5214\" title=\"Tests\">⚠️</a> <a href=\"#ideas-happy5214\" title=\"Ideas, Planning, & Feedback\">🤔</a></td>\n <td align=\"center\"><a href=\"https://github.com/musicinmybrain\"><img src=\"https://avatars.githubusercontent.com/u/6898909?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Ben Beasley</b></sub></a><br /><a href=\"#platform-musicinmybrain\" title=\"Packaging/porting to new platform\">📦</a></td>\n <td align=\"center\"><a href=\"http://chrisgorgolewski.org\"><img src=\"https://avatars.githubusercontent.com/u/238759?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Chris Gorgolewski</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Achrisgorgo\" title=\"Bug reports\">🐛</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=chrisgorgo\" title=\"Code\">💻</a> <a href=\"#data-chrisgorgo\" title=\"Data\">🔣</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=chrisgorgo\" title=\"Documentation\">📖</a> <a href=\"#example-chrisgorgo\" title=\"Examples\">💡</a> <a href=\"#ideas-chrisgorgo\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"#infra-chrisgorgo\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a> <a href=\"#maintenance-chrisgorgo\" title=\"Maintenance\">🚧</a> <a href=\"#mentoring-chrisgorgo\" title=\"Mentoring\">🧑‍🏫</a> <a href=\"#question-chrisgorgo\" title=\"Answering Questions\">💬</a> <a href=\"https://github.com/bids-standard/bids-validator/pulls?q=is%3Apr+reviewed-by%3Achrisgorgo\" title=\"Reviewed Pull Requests\">👀</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=chrisgorgo\" title=\"Tests\">⚠️</a> <a href=\"#tutorial-chrisgorgo\" title=\"Tutorials\">✅</a> <a href=\"#talk-chrisgorgo\" title=\"Talks\">📢</a> <a href=\"#userTesting-chrisgorgo\" title=\"User Testing\">📓</a></td>\n <td align=\"center\"><a href=\"https://github.com/choldgraf\"><img src=\"https://avatars.githubusercontent.com/u/1839645?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Chris Holdgraf</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=choldgraf\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/effigies\"><img src=\"https://avatars.githubusercontent.com/u/83442?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Chris Markiewicz</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=effigies\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=effigies\" title=\"Tests\">⚠️</a> <a href=\"#ideas-effigies\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Aeffigies\" title=\"Bug reports\">🐛</a> <a href=\"#question-effigies\" title=\"Answering Questions\">💬</a> <a href=\"#tool-effigies\" title=\"Tools\">🔧</a> <a href=\"#maintenance-effigies\" title=\"Maintenance\">🚧</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/david-nishi\"><img src=\"https://avatars.githubusercontent.com/u/28666458?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>David Nishikawa</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=david-nishi\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=david-nishi\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"https://github.com/DimitriPapadopoulos\"><img src=\"https://avatars.githubusercontent.com/u/3234522?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Dimitri Papadopoulos Orfanos</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=DimitriPapadopoulos\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://duncanmmacleod.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/1618530?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Duncan Macleod</b></sub></a><br /><a href=\"#infra-duncanmmacleod\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a></td>\n <td align=\"center\"><a href=\"https://github.com/franklin-feingold\"><img src=\"https://avatars.githubusercontent.com/u/35307458?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Franklin Feingold</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=franklin-feingold\" title=\"Documentation\">📖</a></td>\n <td align=\"center\"><a href=\"https://github.com/thinknoack\"><img src=\"https://avatars.githubusercontent.com/u/3342083?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Gregory noack</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=thinknoack\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=thinknoack\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"http://chymera.eu/\"><img src=\"https://avatars.githubusercontent.com/u/950524?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Horea Christian</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=TheChymera\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://kaczmarj.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/17690870?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Jakub Kaczmarzyk</b></sub></a><br /><a href=\"#infra-kaczmarj\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/jokedurnez\"><img src=\"https://avatars.githubusercontent.com/u/7630327?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Joke Durnez</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=jokedurnez\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"http://jasmainak.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/15852194?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Mainak Jas</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=jasmainak\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=jasmainak\" title=\"Tests\">⚠️</a> <a href=\"#ideas-jasmainak\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/pulls?q=is%3Apr+reviewed-by%3Ajasmainak\" title=\"Reviewed Pull Requests\">👀</a> <a href=\"#userTesting-jasmainak\" title=\"User Testing\">📓</a></td>\n <td align=\"center\"><a href=\"http://fair.dei.unipd.it/marco-castellaro\"><img src=\"https://avatars.githubusercontent.com/u/5088923?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Marco Castellaro</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=marcocastellaro\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=marcocastellaro\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"https://github.com/MaxvandenBoom\"><img src=\"https://avatars.githubusercontent.com/u/43676624?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Max</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=MaxvandenBoom\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3AMaxvandenBoom\" title=\"Bug reports\">🐛</a></td>\n <td align=\"center\"><a href=\"http://psychoinformatics.de/\"><img src=\"https://avatars.githubusercontent.com/u/136479?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Michael Hanke</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=mih\" title=\"Documentation\">📖</a></td>\n <td align=\"center\"><a href=\"https://github.com/naveau\"><img src=\"https://avatars.githubusercontent.com/u/1488318?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Mikael Naveau</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=naveau\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/nellh\"><img src=\"https://avatars.githubusercontent.com/u/11369795?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Nell Hardcastle</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=nellh\" title=\"Code\">💻</a> <a href=\"#ideas-nellh\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"#infra-nellh\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a> <a href=\"#question-nellh\" title=\"Answering Questions\">💬</a> <a href=\"https://github.com/bids-standard/bids-validator/pulls?q=is%3Apr+reviewed-by%3Anellh\" title=\"Reviewed Pull Requests\">👀</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/ntraut\"><img src=\"https://avatars.githubusercontent.com/u/22977927?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Nicolas Traut</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=ntraut\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://www.linkedin.com/in/parul-sethi\"><img src=\"https://avatars.githubusercontent.com/u/11822050?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Parul Sethi</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=parulsethi\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=parulsethi\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"https://github.com/patsycle\"><img src=\"https://avatars.githubusercontent.com/u/41481345?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Patricia Clement</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=patsycle\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://remi-gau.github.io/\"><img src=\"https://avatars.githubusercontent.com/u/6961185?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Remi Gau</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=Remi-Gau\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=Remi-Gau\" title=\"Documentation\">📖</a> <a href=\"#userTesting-Remi-Gau\" title=\"User Testing\">📓</a></td>\n <td align=\"center\"><a href=\"https://hoechenberger.net/\"><img src=\"https://avatars.githubusercontent.com/u/2046265?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Richard Höchenberger</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=hoechenberger\" title=\"Code\">💻</a> <a href=\"#userTesting-hoechenberger\" title=\"User Testing\">📓</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=hoechenberger\" title=\"Tests\">⚠️</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Ahoechenberger\" title=\"Bug reports\">🐛</a></td>\n <td align=\"center\"><a href=\"https://github.com/robertoostenveld\"><img src=\"https://avatars.githubusercontent.com/u/899043?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Robert Oostenveld</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=robertoostenveld\" title=\"Code\">💻</a> <a href=\"#ideas-robertoostenveld\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Arobertoostenveld\" title=\"Bug reports\">🐛</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=robertoostenveld\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"https://github.com/SetCodesToFire\"><img src=\"https://avatars.githubusercontent.com/u/25459509?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Rohan Goyal</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=SetCodesToFire\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/rwblair\"><img src=\"https://avatars2.githubusercontent.com/u/14927911?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Ross Blair</b></sub></a><br /><a href=\"#maintenance-rwblair\" title=\"Maintenance\">🚧</a> <a href=\"#ideas-rwblair\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=rwblair\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Arwblair\" title=\"Bug reports\">🐛</a> <a href=\"#infra-rwblair\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a> <a href=\"#projectManagement-rwblair\" title=\"Project Management\">📆</a> <a href=\"#question-rwblair\" title=\"Answering Questions\">💬</a> <a href=\"https://github.com/bids-standard/bids-validator/pulls?q=is%3Apr+reviewed-by%3Arwblair\" title=\"Reviewed Pull Requests\">👀</a> <a href=\"#tool-rwblair\" title=\"Tools\">🔧</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=rwblair\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"http://www.poldracklab.org/\"><img src=\"https://avatars.githubusercontent.com/u/871056?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Russ Poldrack</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=poldrack\" title=\"Code\">💻</a> <a href=\"#financial-poldrack\" title=\"Financial\">💵</a> <a href=\"#fundingFinding-poldrack\" title=\"Funding Finding\">🔍</a></td>\n <td align=\"center\"><a href=\"http://soichi.us/\"><img src=\"https://avatars.githubusercontent.com/u/923896?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Soichi Hayashi</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Asoichih\" title=\"Bug reports\">🐛</a></td>\n <td align=\"center\"><a href=\"https://www.stefanappelhoff.com\"><img src=\"https://avatars.githubusercontent.com/u/9084751?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Stefan Appelhoff</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Asappelhoff\" title=\"Bug reports\">🐛</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=sappelhoff\" title=\"Code\">💻</a> <a href=\"#data-sappelhoff\" title=\"Data\">🔣</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=sappelhoff\" title=\"Documentation\">📖</a> <a href=\"#example-sappelhoff\" title=\"Examples\">💡</a> <a href=\"#ideas-sappelhoff\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"#infra-sappelhoff\" title=\"Infrastructure (Hosting, Build-Tools, etc)\">🚇</a> <a href=\"#maintenance-sappelhoff\" title=\"Maintenance\">🚧</a> <a href=\"#mentoring-sappelhoff\" title=\"Mentoring\">🧑‍🏫</a> <a href=\"#question-sappelhoff\" title=\"Answering Questions\">💬</a> <a href=\"https://github.com/bids-standard/bids-validator/pulls?q=is%3Apr+reviewed-by%3Asappelhoff\" title=\"Reviewed Pull Requests\">👀</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=sappelhoff\" title=\"Tests\">⚠️</a> <a href=\"#tutorial-sappelhoff\" title=\"Tutorials\">✅</a> <a href=\"#talk-sappelhoff\" title=\"Talks\">📢</a> <a href=\"#userTesting-sappelhoff\" title=\"User Testing\">📓</a></td>\n <td align=\"center\"><a href=\"https://github.com/suyashdb\"><img src=\"https://avatars.githubusercontent.com/u/11152799?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Suyash </b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=suyashdb\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/tsalo\"><img src=\"https://avatars.githubusercontent.com/u/8228902?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Taylor Salo</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=tsalo\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/olgn\"><img src=\"https://avatars.githubusercontent.com/u/8853289?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Teal Hobson-Lowther</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=olgn\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=olgn\" title=\"Tests\">⚠️</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/riddlet\"><img src=\"https://avatars.githubusercontent.com/u/4789331?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Travis Riddle</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/issues?q=author%3Ariddlet\" title=\"Bug reports\">🐛</a></td>\n <td align=\"center\"><a href=\"https://github.com/VisLab\"><img src=\"https://avatars.githubusercontent.com/u/1189050?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>VisLab</b></sub></a><br /><a href=\"#ideas-VisLab\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=VisLab\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/wazeerzulfikar\"><img src=\"https://avatars.githubusercontent.com/u/15856554?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Wazeer Zulfikar</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=wazeerzulfikar\" title=\"Documentation\">📖</a></td>\n <td align=\"center\"><a href=\"https://github.com/yarikoptic\"><img src=\"https://avatars.githubusercontent.com/u/39889?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>Yaroslav Halchenko</b></sub></a><br /><a href=\"#ideas-yarikoptic\" title=\"Ideas, Planning, & Feedback\">🤔</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=yarikoptic\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=yarikoptic\" title=\"Documentation\">📖</a> <a href=\"#userTesting-yarikoptic\" title=\"User Testing\">📓</a></td>\n <td align=\"center\"><a href=\"https://github.com/constellates\"><img src=\"https://avatars.githubusercontent.com/u/4325905?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>constellates</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=constellates\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=constellates\" title=\"Tests\">⚠️</a></td>\n <td align=\"center\"><a href=\"https://github.com/dewarrn1\"><img src=\"https://avatars.githubusercontent.com/u/1322751?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>dewarrn1</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=dewarrn1\" title=\"Code\">💻</a></td>\n <td align=\"center\"><a href=\"https://github.com/dkp\"><img src=\"https://avatars.githubusercontent.com/u/965184?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>dkp</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=dkp\" title=\"Code\">💻</a></td>\n </tr>\n <tr>\n <td align=\"center\"><a href=\"https://github.com/MatthewZito\"><img src=\"https://avatars.githubusercontent.com/u/47864657?v=4?s=50\" width=\"50px;\" alt=\"\"/><br /><sub><b>goldmund</b></sub></a><br /><a href=\"https://github.com/bids-standard/bids-validator/commits?author=MatthewZito\" title=\"Code\">💻</a> <a href=\"https://github.com/bids-standard/bids-validator/commits?author=MatthewZito\" title=\"Tests\">⚠️</a></td>\n </tr>\n</table>\n\n<!-- markdownlint-restore -->\n<!-- prettier-ignore-end -->\n\n<!-- ALL-CONTRIBUTORS-LIST:END -->\n\n## Use\n\n### API\n\nThe BIDS Validator has one primary method that takes a directory as either a\npath to the directory (node) or the object given by selecting a directory with a\nfile input (browser), an options object, and a callback.\n\nAvailable options include:\n\n- ignoreWarnings - (boolean - defaults to false)\n- ignoreNiftiHeaders - (boolean - defaults to false)\n\nFor example:\n\n`validate.BIDS(directory, {ignoreWarnings: true}, function (issues, summary) {console.log(issues.errors, issues.warnings);});`\n\nIf you would like to test individual files you can use the file specific checks\nthat we expose.\n\n- validate.BIDS()\n- validate.JSON()\n- validate.TSV()\n- validate.NIFTI()\n\nAdditionally you can reformat stored errors against a new config using `validate.reformat()`\n\n### .bidsignore\n\nOptionally one can include a `.bidsignore` file in the root of the dataset. This\nfile lists patterns (compatible with the [.gitignore syntax](https://git-scm.com/docs/gitignore))\ndefining files that should be ignored by the validator. This option is useful\nwhen the validated dataset includes file types not yet supported by BIDS\nspecification.\n\n```Text\n*_not_bids.txt\nextra_data/\n```\n\n### Configuration\n\nYou can configure the severity of errors by passing a json configuration file\nwith a `-c` or `--config` flag to the command line interface or by defining a\nconfig object on the options object passed during javascript usage.\n\nIf no path is specified a default path of `.bids-validator-config.json` will be used. You can add this file to your dataset to share dataset specific validation configuration. To disable this behavior use `--no-config` and the default configuration will be used.\n\nThe basic configuration format is outlined below. All configuration is optional.\n\n```JSON\n{\n\t\"ignore\": [],\n\t\"warn\": [],\n\t\"error\": [],\n\t\"ignoredFiles\": []\n}\n```\n\n`ignoredFiles` takes a list of file paths or glob patterns you'd like to ignore.\nLets say we want to ignore all files and sub-directory under `/derivatives/`.\n**This is not the same syntax as used in the .bidsignore file**\n\n```JSON\n{\n\t\"ignoredFiles\": [\"/derivatives/**\"]\n}\n```\n\nNote that adding two stars `**` in path makes validator recognize all files and\nsub-dir to be ignored.\n\n`ignore`, `warn`, and `error` take lists of issue codes or issue keys and change\nthe severity of those issues so they are either ignored or reported as warnings\nor errors. You can find a list of all available issues at\n[utils/issues/list](https://github.com/bids-standard/bids-validator/blob/master/bids-validator/utils/issues/list.js).\n\nSome issues may be ignored by default, but can be elevated to warnings or errors.\nThese provide a way to check for common things that are more specific than BIDS\ncompatibility. An example is a check for the presence of a T1w modality. The\nfollowing would raise an error if no T1W image was found in a dataset.\n\n```JSON\n{\n\t\"error\": [\"NO_T1W\"]\n}\n```\n\nIn addition to issue codes and keys these lists can also contain objects with\nand \"and\" or \"or\" properties set to arrays of codes or keys. These allow some\nlevel of conditional logic when configuring issues. For example:\n\n```JSON\n{\n\t\"ignore\": [\n\t\t{\n\t\t\t\"and\": [\n\t\t\t\t\"ECHO_TIME_GREATER_THAN\",\n\t\t\t\t\"ECHO_TIME_NOT_DEFINED\"\n\t\t\t]\n\t\t}\n\t]\n}\n```\n\nIn the above example the two issues will only be ignored if both of them are\ntriggered during validation.\n\n```JSON\n{\n\t\"ignore\": [\n\t\t{\n\t\t\t\"and\": [\n\t\t\t\t\"ECHO_TIME_GREATER_THAN\",\n\t\t\t\t\"ECHO_TIME_NOT_DEFINED\"\n\t\t\t\t{\n\t\t\t\t\t\"or\": [\n\t\t\t\t\t\t\"ECHO_TIME1-2_NOT_DEFINED\",\n\t\t\t\t\t\t\"ECHO_TIME_MUST_DEFINE\"\n\t\t\t\t\t]\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t]\n}\n```\n\nAnd in this example the listed issues will only be ignored if\n`ECHO_TIME_GREATER_THAN`, `ECHO_TIME_NOT_DEFINED` and either\n`ECHO_TIME1-2_NOT_DEFINED` or `ECHO_TIME_MUST_DEFINE` are triggered during\nvalidation.\n\n\"or\" arrays are not supported at the lowest level because it wouldn't add any\nfunctionality. For example the following is not supported.\n\n```JSON\n{\n\t\"ignore\": [\n\t\t{\n\t\t\t\"or\": [\n\t\t\t\t\"ECHO_TIME_GREATER_THAN\",\n\t\t\t\t\"ECHO_TIME_NOT_DEFINED\"\n\t\t\t]\n\t\t}\n\t]\n}\n```\n\nbecause it would be functionally the same as this:\n\n```JSON\n{\n\t\"ignore\": [\n\t\t\"ECHO_TIME_GREATER_THAN\",\n\t\t\"ECHO_TIME_NOT_DEFINED\"\n\t]\n}\n```\n\nFor passing a configuration while using the bids-validator on the command line,\nyou can use the following style to for example ignore empty\nfile errors (99) and files that cannot be read (44):\n\n```\nbids-validator --config.ignore=99 --config.ignore=44 path/to/bids/dir\n```\n\nThis style of use puts limits on what configuration you can require, so for\ncomplex scenarios, we advise users to create a dedicated configuration file with\ncontents as described above.\n\n### In the Browser\n\nThe BIDS Validator currently works in the browser with [browserify](https://browserify.org/)\nor [webpack](https://webpack.js.org/). You can add it to a project by cloning\nthe validator and requiring it with browserify syntax\n`const validate = require('bids-validator');` or an ES2015 webpack import\n`import validate from 'bids-validator'`.\n\n### On the Server\n\nThe BIDS validator works like most npm packages. You can install it by running\n`npm install bids-validator`.\n\n### Through Command Line\n\nIf you install the bids validator globally by using `npm install -g bids-validator`\nyou will be able to use it as a command line tool. Once installed you should be\nable to run `bids-validator /path/to/your/bids/directory` and see any validation\nissues logged to the terminal. Run `bids-validator` without a directory path to\nsee available options.\n\n## Docker image\n\n[![Docker Image Version (latest by date)](https://img.shields.io/docker/v/bids/validator?label=docker)](https://hub.docker.com/r/bids/validator)\n\nTo use bids validator with [docker](https://www.docker.com/), you simply need to\n[install docker](https://docs.docker.com/install/) on your system.\n\nAnd then from a terminal run:\n\n- `docker run -ti --rm bids/validator --version` to print the version of the\n docker image\n- `docker run -ti --rm bids/validator --help` to print the help\n- `docker run -ti --rm -v /path/to/data:/data:ro bids/validator /data`\n to validate the dataset `/path/to/data` on your host machine\n\nSee here for a brief explanation of the commands:\n\n- `docker run` is the command to tell docker to run a certain docker image,\n usually taking the form `docker run <IMAGENAME> <COMMAND>`\n- the `-ti` flag means the inputs are accepted and outputs are printed to the\n terminal\n- the `--rm` flag means that the state of the docker container is not saved\n after it has run\n- the `-v` flag is adding your local data to the docker container\n ([bind-mounts](https://docs.docker.com/storage/bind-mounts/)). Importantly,\n the input after the `-v` flag consists of three fields separated colons: `:`\n - the first field is the path to the directory on the host machine:\n `/path/to/data`\n - the second field is the path where the directory is mounted in the\n container\n - the third field is optional. In our case, we use `ro` to specify that the\n mounted data is _read only_\n\n## Python Library\n\n[![PyPI version](https://badge.fury.io/py/bids-validator.svg)](https://badge.fury.io/py/bids-validator)\n[![Conda version](https://img.shields.io/conda/vn/conda-forge/bids-validator)](https://anaconda.org/conda-forge/bids-validator)\n\nThere are is a limited library of helper functions written in Python. The main function\ndetermines if a file extension is compliant with the BIDS specification. You can find\nthe available functions in the library, as well as their descriptions,\n[here](https://github.com/bids-standard/bids-validator/blob/master/bids-validator/bids_validator/bids_validator.py).\nTo install, run `pip install -U bids_validator` (requires python and pip) or\n`conda install bids-validator` (requires a Conda environment).\n\n### Example\n\n```Python\nfrom bids_validator import BIDSValidator\nvalidator = BIDSValidator()\nfilepaths = [\"/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz\", \"/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe\"]\nfor filepath in filepaths:\n print(validator.is_bids(filepath)) # will print True, and then False\n```\n\n## Development\n\nTo develop locally, clone the project and run `npm install` from the project\nroot. This will install external dependencies. If you wish to install\n`bids-validator` globally (so that you can run it in other folders), use the\nfollowing command to install it globally: `cd bids-validator && npm install -g` (for windows users, if in a different drive add /d, e.g. `cd /d F:\\bids-validator && npm install -g`)\n\nPlease see the [CONTRIBUTING.md](../CONTRIBUTING.md)\nfor additional details.\n\n### Bundling\n\nbids-validator is bundled with esbuild. While developing, the script `bids-validator/bin/bids-validator` will automatically bundle the project each time it is run. To test a build without publishing it `npm -w bids-validator run build`. This will generate a bids-validator/dist directory containing the local build and `bids-validator/bin/bids-validator` will use this build. To return to automatic bundling on each run, remove the dist directory.\n\n### Running Locally in a Browser\n\nA note about OS X, the dependencies for the browser require a npm package called\nnode-gyp which needs xcode to be installed in order to be compiled.\n\n1. The browser version of `bids-validator` lives in the repo subdirectory\n `/bids-validator-web`. It is a [React.js](https://reactjs.org/) application\n that uses the [next.js](https://nextjs.org/) framework.\n2. To develop `bids-validator` and see how it will act in the browser, simply run\n `npm run web-dev` in the project root and navigate to `localhost:3000`.\n3. In development mode, changes to the codebase will trigger rebuilds of the application\n automatically.\n4. Changes to the `/bids-validator` in the codebase will also be reflected in the\n web application.\n5. Tests use the [Jest](https://jestjs.io/index.html) testing library and should be developed in `/bids-validator-web/tests`.\n We can always use more tests, so please feel free to contribute a test that reduces the chance\n of any bugs you fix!\n6. To ensure that the web application compiles successfully in production, run `npm run web-export`\n\n### Testing\n\nIf it's your first time running tests, first use the command `git submodule update --init --depth 1` to pull the test example data. This repo contains the [bids-examples github repository](https://github.com/bids-standard/bids-examples) as a [submodule](https://git-scm.com/book/en/v2/Git-Tools-Submodules).\n\nTo start the test suite run `npm run test` from the project root. `npm run test -- --watch`\nis useful to run tests while making changes. A coverage report is available with\n`npm run coverage`.\n\nTo run the linter which checks code conventions run `npm run lint`.\n\n### Install globally from a development branch\n\nGlobal installs are not recommended for development because of the possibility of package conflicts with other Node.js projects. If you do need to test with a global install from a development tree, follow these steps to generate the NPM package without publishing it and install the package locally.\n\n1. `npm -w bids-validator run build`\n2. `npm -w bids-validator pack`\n3. `npm install -g bids-validator-*.tgz`\n\n### Publishing\n\nPublishing is done with [Lerna](https://github.com/lerna/lerna). Use the command `npx lerna publish` and follow instructions to set a new version.\n\nUsing lerna publish will create a git commit with updated version information and create a version number tag for it, push the tag to GitHub, then publish to NPM and PyPI. The GitHub release is manual following that.\n\n## Acknowledgments\n\nMany contributions to the `bids-validator` were done by members of the\nBIDS community. See the\n[list of contributors](https://bids-specification.readthedocs.io/en/stable/99-appendices/01-contributors.html).\n\nA large part of the development of `bids-validator` is currently done by\n[Squishymedia](https://squishymedia.com/), who are in turn financed through\ndifferent grants offered for the general development of BIDS. See the list\nbelow.\n\nDevelopment and contributions were supported through the following federally\nfunded projects/grants:\n\n- [BIDS Derivatives (NIMH: R24MH114705, PI: Poldrack)](https://grantome.com/grant/NIH/R24-MH114705-01)\n- [OpenNeuro (NIMH: R24MH117179, PI: Poldrack)](https://grantome.com/grant/NIH/R24-MH117179-01)\n- [Spokes: MEDIUM: WEST (NSF: 1760950, PI: Poldrack & Gorgolewski)](https://grantome.com/grant/NSF/IIS-1760950)\n- [ReproNim](http://repronim.org) [(NIH-NIBIB P41 EB019936, PI: Kennedy)](https://projectreporter.nih.gov/project_info_description.cfm?aid=8999833)\n" }, { "alpha_fraction": 0.6207627058029175, "alphanum_fraction": 0.6207627058029175, "avg_line_length": 19.521739959716797, "blob_id": "6c86a89851fb77d5efb962eceb5b7bab871f2159", "content_id": "1dc3b0c975e73c31ba8727102bffc05acb2b56a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 944, "license_type": "permissive", "max_line_length": 81, "num_lines": 46, "path": "/bids-validator/src/files/ignore.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { BIDSFile } from '../types/file.ts'\nimport { ignore, Ignore } from '../deps/ignore.ts'\n\nexport async function readBidsIgnore(file: BIDSFile) {\n const value = await file.text()\n if (value) {\n const lines = value.split('\\n')\n return lines\n } else {\n return []\n }\n}\n\nconst defaultIgnores = [\n '.git**',\n '.datalad/',\n '.reproman/',\n 'sourcedata/',\n 'code/',\n 'stimuli/',\n 'log/',\n '**/meg/*.ds/**',\n '**/micr/*.zarr/**',\n]\n\n/**\n * Deno implementation of .bidsignore style rules\n */\nexport class FileIgnoreRules {\n #ignore: Ignore\n\n constructor(config: string[]) {\n this.#ignore = ignore({ allowRelativePaths: true })\n this.#ignore.add(defaultIgnores)\n this.#ignore.add(config)\n }\n\n add(config: string[]): void {\n this.#ignore.add(config)\n }\n\n /** Test if a dataset relative path should be ignored given configured rules */\n test(path: string): boolean {\n return this.#ignore.ignores(path)\n }\n}\n" }, { "alpha_fraction": 0.7514792680740356, "alphanum_fraction": 0.7536837458610535, "avg_line_length": 50.30356979370117, "blob_id": "4719636392f6ea9bf84414065e8b2a49a1007083", "content_id": "e327089823f89171ef88e49d2140699e0fabdb70", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8619, "license_type": "permissive", "max_line_length": 235, "num_lines": 168, "path": "/CONTRIBUTING.md", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "# Contributing Guide\n\nWelcome to the contributing guide!\n\nThis guide exists to help BIDS users to contribute to the BIDS validator with\ntheir own code. We cover:\n\n1. Knowledge that might be helpful to have (or acquire)\n1. How to set up your development environment for BIDS validator\n1. Ways to contribute code to BIDS validator (e.g., for a BIDS extension\n proposal)\n\nIf you find that something is unclear, please open an [issue](https://github.com/bids-standard/bids-validator/issues)\nso that we can improve this guide.\n\n## Knowledge that will help you along the way\n\n### Git\n\nWe use [`git`][link_git] for source control.\nIf you're not yet familiar with `git`, there are lots of great resources to help you\nget started!\nSome of our favorites include the [git Handbook][link_handbook] and\nthe [Software Carpentry introduction to git][link_swc_intro].\n\nIn particular, you will want to become conversant with the following operations:\n\n- [`git clone`](https://git-scm.com/docs/git-clone)\n- [`git branch`](https://git-scm.com/docs/git-branch)\n- [`git checkout`](https://git-scm.com/docs/git-checkout)\n- [`git status`](https://git-scm.com/docs/git-status)\n- [`git pull`](https://git-scm.com/docs/git-pull)\n- [`git add`](https://git-scm.com/docs/git-add)\n- [`git commit`](https://git-scm.com/docs/git-commit)\n- [`git push`](https://git-scm.com/docs/git-push)\n\nYou should also configure [configure git for your\nuser](https://git-scm.com/book/en/v2/Customizing-Git-Git-Configuration), so your commits\nare properly attributed.\n\n### GitHub\n\nWe use [GitHub](https://github.com) to manage contributions and have development\ndiscussions in the open.\nTo participate, be sure you know how to\n\n- [Fork the repository][link_fork]\n- [Open pull requests][link_pullrequest]\n\n### Coding knowledge\n\n- Familiarize yourself with the command line on your system (e.g., `bash`)\n- Basic knowledge about coding is helpful and familiarity with JavaScript\n is a big bonus, but you can contribute to the BIDS validator also without\n specific knowledge of JavaScript\n- Some knowledge about software testing (why we are doing it) would be nice\n\n## Using the development version of BIDS validator\n\n1. [Make a GitHub account][link_signupinstructions]\n1. Install the required software:\n - [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git)\n - [NodeJS](https://nodejs.org/en/download/), including `npm` of version 7 or higher (check existing version with `npm -v`, to update an existing nodejs, we can suggest [this guide](https://phoenixnap.com/kb/update-node-js-version).)\n1. In the GitHub interface, [make a fork][link_fork] of\n https://github.com/bids-standard/bids-validator to your own user (called `USER` for the\n sake of the example)\n - you will now have your own copy of BIDS validator at https://github.com/USER/bids-validator\n1. Open a command line and navigate to the location on your computer from where\n you want to develop BIDS validator and [clone][link_clone] **your**\n fork of the repository\n - You will now have a new directory called `bids-validator`\n - navigate to that directory and run `git status` to verify that it's a `git`\n directory\n - run `npm install` to install the BIDS validator\n1. Upon inspection of the `bids-validator` repository we can find the\n \"executable\" BIDS validator, located in `<...>/bids-validator/bin`, where\n `<...>` is the path to your `bids-validator` repository\n - To make this executable available from the command line, we have to add it\n to the path. On Unix systems with bash as their default shell, this means\n editing the `.bashrc` file by adding the following line to the bottom of\n it: `export PATH=\"$PATH:<...>/bids-validator/bin\"` ... Note that `<...>`\n again needs to be replaced by the path to your BIDS validator repository\n - Now whenever you open a new command line, we will have the `bids-validator`\n executable available. You can verify by opening a new command line and\n typing `bids-validator --version`, and it should print the version number\n\nNow your development version of BIDS validator is set up and you can use it.\nWhenever you _checkout_ a new branch in your git repository, the\n`bids-validator` executable is now pointing to that branch, and all changes in\nthat branch will be reflected in the behavior of `bids-validator`.\n\nBefore you start making changes, there are some more important points to\nconsider:\n\n1. We need to tell **your** git directory, that it has a _remote_\n counterpart (namely, the original BIDS validator). When that counterpart\n gets updated, you have to update your BIDS validator as well, to keep in\n sync.\n - run `git remote add upstream https://github.com/bids-standard/bids-validator`\n - then run `git remote -v`, and it should show four entries: two of type\n `origin`, and two of type `upstream`\n - `origin` refers to **your** fork of BIDS validator on GitHub, whereas\n `upstream` refers to the original BIDS validator repository on GitHub\n - you can use `upstream` to always stay up to date with changes that are\n being made on the original BIDS validator. For that, simply navigate to\n the `master` branch of **your** repository using `git checkout master`,\n and then run `git pull upstream master`\n1. When you get completely stuck with your repository and you just want to\n reset it to be an exact mirror of the original BIDS validator, you can\n run the following command (Note: this will discard all current changes):\n - first checkout your master: `git checkout master`\n - then run: `git reset --hard upstream/master`\n\n## Extending the BIDS validator for a BIDS Extension Proposal (BEP)\n\n### Regular expressions\n\nA lot of validation of BIDS files and directories is happening through\n[regular expressions](https://en.wikipedia.org/wiki/Regular_expression).\nYou can see the regular expressions\n[here](https://github.com/bids-standard/bids-validator/tree/master/bids-validator/bids_validator/rules).\n\nChanging the regular expressions can be a delicate thing, so we recommend\ntesting your regular expressions exhaustively. A helpful website for that can\nbe [https://regex101.com/](https://regex101.com/), where you can test regular\nexpressions in your browser, and even save and share them.\n\n### JSON schemas\n\nAnother big chunk of BIDS validation is happening through [JSON schemas](https://json-schema.org/).\nIn BIDS, a lot of metadata is saved in JSON files, which are very well defined\nand readable by a computer. With these properties, we can make requirements of\nhow a JSON ought to look like. You can find our JSON schemas\n[here](https://github.com/bids-standard/bids-validator/tree/master/bids-validator/validators/json/schemas).\n\nAs with regular expressions, we recommend lots of testing on the JSON schemas.\nYou can easily have a first try of that using a website like\n[https://www.jsonschemavalidator.net/](https://www.jsonschemavalidator.net/).\nSimply copy over a schema from BIDS validator to the left field, and try to\ncomply to the schema, or trigger an error by typing in a JSON to the right\nfield.\n\n### Writing tests\n\nFor every change you make it is important to include a test. That way, we can\nmake sure that the behavior of BIDS validator is as expected, and furthermore\nwe will be notified whenever a contributor makes a change in the code that\nbreaks the expected behavior of the BIDS validator.\n\nA test usually provides some known data, and let's the software run over it ...\njust to check whether the output is as we know it should be (because we know\nthe data, after all).\n\nYou can get a good impression using the following links:\n\n- [How regular expressions are tested](https://github.com/bids-standard/bids-validator/blob/master/bids-validator/tests/type.spec.js)\n- [How JSON schemas are tested](https://github.com/bids-standard/bids-validator/blob/master/bids-validator/tests/json.spec.js)\n- [How TSV files are tested](https://github.com/bids-standard/bids-validator/blob/master/bids-validator/tests/tsv.spec.js)\n\nFor more information on how to run the tests check the [testing section](./README.md#testing) of the README.\n\n[link_git]: https://git-scm.com/\n[link_handbook]: https://guides.github.com/introduction/git-handbook/\n[link_swc_intro]: http://swcarpentry.github.io/git-novice/\n[link_signupinstructions]: https://help.github.com/articles/signing-up-for-a-new-github-account\n[link_pullrequest]: https://help.github.com/articles/creating-a-pull-request-from-a-fork\n[link_fork]: https://help.github.com/articles/fork-a-repo/\n[link_clone]: https://help.github.com/articles/cloning-a-repository\n" }, { "alpha_fraction": 0.6111111044883728, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 12.090909004211426, "blob_id": "7d4fbbfe5b3554d6edb76c94707e11d1297decaf", "content_id": "426546f4a06e5125dc76773e1ee6e485d4a9142b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 144, "license_type": "permissive", "max_line_length": 53, "num_lines": 11, "path": "/bids-validator/validators/bids/reset.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Reset\n *\n * Resets the in object data back to original values.\n */\n\nconst reset = (BIDS) => {\n BIDS.issues = []\n}\n\nexport default reset\n" }, { "alpha_fraction": 0.6384000182151794, "alphanum_fraction": 0.6431999802589417, "avg_line_length": 28.069766998291016, "blob_id": "48161f359e8d792614c012a38985fa0fd9895bb4", "content_id": "0f9831d7f0a91131f9cd8eb222b08c492e9e19bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1250, "license_type": "permissive", "max_line_length": 70, "num_lines": 43, "path": "/bids-validator/src/tests/local/valid_headers.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// Deno runtime tests for tests/data/valid_headers\nimport { assert, assertEquals } from '../../deps/asserts.ts'\nimport { validatePath, formatAssertIssue } from './common.ts'\n\nconst PATH = 'tests/data/valid_headers'\n\nDeno.test('valid_headers dataset', async (t) => {\n const { tree, result } = await validatePath(t, PATH)\n\n await t.step('correctly ignores .bidsignore files', () => {\n assert(\n result.issues.get('NOT_INCLUDED') === undefined,\n formatAssertIssue(\n 'NOT_INCLUDED should not be present',\n result.issues.get('NOT_INCLUDED'),\n ),\n )\n })\n\n await t.step('summary has correct tasks', () => {\n assertEquals(Array.from(result.summary.tasks), ['rhyme judgment'])\n })\n\n await t.step('summary has correct dataProcessed', () => {\n assertEquals(result.summary.dataProcessed, false)\n })\n\n await t.step('summary has correct modalities', () => {\n assertEquals(result.summary.modalities, ['MRI'])\n })\n\n await t.step('summary has correct totalFiles', () => {\n assertEquals(result.summary.totalFiles, 8)\n })\n\n await t.step('summary has correct subjectMetadata', () => {\n assertEquals(result.summary.subjectMetadata[0], {\n age: 25,\n participantId: '01',\n sex: 'M',\n })\n })\n})\n" }, { "alpha_fraction": 0.6409146189689636, "alphanum_fraction": 0.6418617367744446, "avg_line_length": 26.374074935913086, "blob_id": "a741dd2e8c1a02eea1cd869ea8567223c36bb7ef", "content_id": "aaf8efce52d82beb715eb7913b97cffd93de896e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 7391, "license_type": "permissive", "max_line_length": 82, "num_lines": 270, "path": "/bids-validator/src/validators/filenameValidate.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { CheckFunction, RuleCheckFunction } from '../types/check.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport { GenericSchema, Schema, Entity, Format } from '../types/schema.ts'\nimport { SEP } from '../deps/path.ts'\nimport { hasProp } from '../utils/objectPathHandler.ts'\n\nconst sidecarExtensions = ['.json', '.tsv', '.bvec', '.bval']\n\nconst CHECKS: CheckFunction[] = [\n missingLabel,\n atRoot,\n entityLabelCheck,\n checkRules,\n]\n\nexport async function filenameValidate(\n schema: GenericSchema,\n context: BIDSContext,\n) {\n for (const check of CHECKS) {\n await check(schema, context)\n }\n return Promise.resolve()\n}\n\nexport function isAtRoot(context: BIDSContext) {\n if (context.file.path.split(SEP).length !== 2) {\n return false\n }\n return true\n}\n\nexport async function missingLabel(\n schema: GenericSchema,\n context: BIDSContext,\n) {\n const fileNoLabelEntities = Object.keys(context.entities).filter(\n (key) => context.entities[key] === 'NOENTITY',\n )\n\n const fileEntities = Object.keys(context.entities).filter(\n (key) => !fileNoLabelEntities.includes(key),\n )\n\n if (fileNoLabelEntities.length) {\n context.issues.addNonSchemaIssue('ENTITY_WITH_NO_LABEL', [\n { ...context.file, evidence: fileNoLabelEntities.join(', ') },\n ])\n }\n return Promise.resolve()\n}\n\nexport function atRoot(schema: GenericSchema, context: BIDSContext) {\n /*\n if (fileIsAtRoot && !sidecarExtensions.includes(context.extension)) {\n // create issue for data file in root of dataset\n }\n */\n return Promise.resolve()\n}\n\nexport function lookupEntityLiteral(name: string, schema: GenericSchema) {\n if (\n schema.objects &&\n hasProp(schema.objects, 'entities') &&\n hasProp(schema.objects.entities, name)\n ) {\n const entityObj = schema.objects.entities[name]\n if (hasProp(entityObj, 'name')) {\n return entityObj.name\n }\n }\n // if this happens there is an issue with the schema?\n return ''\n}\n\nfunction getEntityByLiteral(\n fileEntity: string,\n schema: GenericSchema,\n): null | Entity {\n if (\n 'entities' in schema.objects &&\n typeof schema.objects.entities === 'object'\n ) {\n const entities = schema.objects.entities\n const key = Object.keys(entities).find((key) => {\n return (\n hasProp(entities, key) &&\n hasProp(entities[key], 'name') &&\n // @ts-expect-error\n entities[key].name === fileEntity\n )\n })\n if (key && hasProp(entities, key)) {\n return entities[key] as Entity\n }\n }\n return null\n}\n\nexport async function entityLabelCheck(\n schema: GenericSchema,\n context: BIDSContext,\n) {\n if (!('formats' in schema.objects) || !('entities' in schema.objects)) {\n throw new Error('schema missing keys')\n }\n const formats = schema.objects.formats as unknown as Record<string, Format>\n const entities = schema.objects.entities as unknown as Record<string, Entity>\n Object.keys(context.entities).map((fileEntity) => {\n const entity = getEntityByLiteral(fileEntity, schema)\n if (\n entity &&\n entity.format &&\n typeof entity.format === 'string' &&\n hasProp(formats, entity.format)\n ) {\n // assuming all formats are well defined in schema.objects\n const pattern = formats[entity.format].pattern\n const rePattern = new RegExp(`^${pattern}$`)\n const label = context.entities[fileEntity]\n if (!rePattern.test(label)) {\n context.issues.addNonSchemaIssue('INVALID_ENTITY_LABEL', [\n {\n ...context.file,\n evidence: `entity: ${fileEntity} label: ${label} pattern: ${pattern}`,\n },\n ])\n }\n } else {\n // unknown entity\n }\n })\n return Promise.resolve()\n}\n\nconst ruleChecks: RuleCheckFunction[] = [\n entityRuleIssue,\n datatypeMismatch,\n extensionMismatch,\n]\n\nasync function checkRules(schema: GenericSchema, context: BIDSContext) {\n if (context.filenameRules.length === 1) {\n for (const check of ruleChecks) {\n check(\n context.filenameRules[0],\n schema as unknown as GenericSchema,\n context,\n )\n }\n } else {\n const ogIssues = context.issues\n const noIssues: [string, DatasetIssues][] = []\n const someIssues: [string, DatasetIssues][] = []\n for (const path of context.filenameRules) {\n const tempIssues = new DatasetIssues()\n context.issues = tempIssues\n for (const check of ruleChecks) {\n check(path, schema as unknown as GenericSchema, context)\n }\n tempIssues.size\n ? someIssues.push([path, tempIssues])\n : noIssues.push([path, tempIssues])\n }\n if (noIssues.length) {\n context.issues = ogIssues\n context.filenameRules = [noIssues[0][0]]\n } else if (someIssues.length) {\n // What would we want to do with each rules issues? Add all?\n context.issues = ogIssues\n context.issues.addNonSchemaIssue('ALL_FILENAME_RULES_HAVE_ISSUES', [\n {\n ...context.file,\n evidence: `Rules that matched with issues: ${someIssues\n .map((x) => x[0])\n .join(', ')}`,\n },\n ])\n }\n }\n return Promise.resolve()\n}\n\nfunction entityRuleIssue(\n path: string,\n schema: GenericSchema,\n context: BIDSContext,\n) {\n const rule = schema[path]\n if (!('entities' in rule)) {\n if (Object.keys(context.entities).length > 0) {\n // Throw issue for entity in file but not rule\n }\n return\n }\n\n const fileEntities = Object.keys(context.entities)\n const ruleEntities = Object.keys(rule.entities).map((key) =>\n lookupEntityLiteral(key, schema),\n )\n\n // skip required entity checks if file is at root.\n // No requirements for inherited sidecars at this level.\n if (!isAtRoot(context)) {\n const ruleEntitiesRequired = Object.entries(rule.entities)\n .filter(([_, v]) => v === 'required')\n .map(([k, _]) => lookupEntityLiteral(k, schema))\n\n const missingRequired = ruleEntitiesRequired.filter(\n (required) => !fileEntities.includes(required as string),\n )\n\n if (missingRequired.length) {\n context.issues.addNonSchemaIssue('MISSING_REQUIRED_ENTITY', [\n {\n ...context.file,\n evidence: `${missingRequired.join(', ')} missing from rule ${path}`,\n },\n ])\n }\n }\n\n const entityNotInRule = fileEntities.filter(\n (fileEntity) => !ruleEntities.includes(fileEntity),\n )\n\n if (entityNotInRule.length) {\n context.issues.addNonSchemaIssue('ENTITY_NOT_IN_RULE', [\n {\n ...context.file,\n evidence: `${entityNotInRule.join(', ')} not in rule ${path}`,\n },\n ])\n }\n}\n\nfunction datatypeMismatch(\n path: string,\n schema: GenericSchema,\n context: BIDSContext,\n) {\n const rule = schema[path]\n if (\n !!context.datatype &&\n Array.isArray(rule.datatypes) &&\n !rule.datatypes.includes(context.datatype)\n ) {\n context.issues.addNonSchemaIssue('DATATYPE_MISMATCH', [\n { ...context.file, evidence: `Datatype rule being applied: ${path}` },\n ])\n }\n}\n\nasync function extensionMismatch(\n path: string,\n schema: GenericSchema,\n context: BIDSContext,\n) {\n const rule = schema[path]\n if (\n Array.isArray(rule.extensions) &&\n !rule.extensions.includes(context.extension)\n ) {\n context.issues.addNonSchemaIssue('EXTENSION_MISMATCH', [\n { ...context.file, evidence: `Rule: ${path}` },\n ])\n }\n}\n" }, { "alpha_fraction": 0.7858585715293884, "alphanum_fraction": 0.7858585715293884, "avg_line_length": 54, "blob_id": "fd128fef4eb10d0f0eb8b80bdbf66398828e08f2", "content_id": "a63f56e65faa932c1d8eba425e7493707ea8d934", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 990, "license_type": "permissive", "max_line_length": 101, "num_lines": 18, "path": "/bids-validator/utils/common.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// An index of rules documents to export as modules\n// The Python module expects these to be within its tree, but we can just import them from there\nimport associated_data_rules from '../bids_validator/bids_validator/rules/associated_data_rules.json'\n\nimport file_level_rules from '../bids_validator/bids_validator/rules/file_level_rules.json'\nimport phenotypic_rules from '../bids_validator/bids_validator/rules/phenotypic_rules.json'\nimport session_level_rules from '../bids_validator/bids_validator/rules/session_level_rules.json'\nimport subject_level_rules from '../bids_validator/bids_validator/rules/subject_level_rules.json'\nimport top_level_rules from '../bids_validator/bids_validator/rules/top_level_rules.json'\n\nexport default {\n associated_data_rules: associated_data_rules,\n file_level_rules: file_level_rules,\n phenotypic_rules: phenotypic_rules,\n session_level_rules: session_level_rules,\n subject_level_rules: subject_level_rules,\n top_level_rules: top_level_rules,\n}\n" }, { "alpha_fraction": 0.4588235318660736, "alphanum_fraction": 0.4647058844566345, "avg_line_length": 13.166666984558105, "blob_id": "ebcc6a037018b5a53096bcdefb95ce22eb09fc88", "content_id": "08026c3d18eea23c4ed06a2fe98d6e61e99a5f5d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 170, "license_type": "permissive", "max_line_length": 19, "num_lines": 12, "path": "/bids-validator/validators/bids/obj.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export default {\n options: {},\n issues: [],\n summary: {\n sessions: [],\n subjects: [],\n tasks: [],\n modalities: [],\n totalFiles: [],\n size: 0,\n },\n}\n" }, { "alpha_fraction": 0.6157518029212952, "alphanum_fraction": 0.6193317174911499, "avg_line_length": 27.406780242919922, "blob_id": "62f462d3f71322e9e4f26cd38d8a7c6e2e51f0d7", "content_id": "7cbddf4a1522a953b3aa7bd023273467b76b0000", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1676, "license_type": "permissive", "max_line_length": 88, "num_lines": 59, "path": "/bids-validator/validators/json/validate.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import json from './json'\nimport utils from '../../utils'\nconst Issue = utils.issues.Issue\n\nconst validate = (jsonFiles, fileList, jsonContentsDict, summary) => {\n let issues = []\n const jsonValidationPromises = jsonFiles.map(function (file) {\n return utils.limit(\n () =>\n new Promise((resolve) => {\n checkForAccompanyingDataFile(file, fileList, issues)\n json(file, jsonContentsDict, (jsonIssues, jsObj) => {\n issues = issues.concat(jsonIssues)\n collectTaskSummary(file, jsObj, summary)\n return resolve()\n })\n }),\n )\n })\n\n return new Promise((resolve) =>\n Promise.all(jsonValidationPromises).then(() => resolve(issues)),\n )\n}\n\nconst collectTaskSummary = (file, jsObj, summary) => {\n // collect task summary\n if (file.name.indexOf('task') > -1) {\n if (\n jsObj &&\n jsObj.TaskName &&\n summary.tasks.indexOf(jsObj.TaskName) === -1\n ) {\n summary.tasks.push(jsObj.TaskName)\n }\n }\n}\n\nconst checkForAccompanyingDataFile = (file, fileList, issues) => {\n // Verify that the json file has an accompanying data file\n // Need to limit checks to files in sub-*/**/ - Not all data dictionaries are sidecars\n const pathArgs = file.relativePath.split('/')\n const isSidecar =\n pathArgs[1].includes('sub-') && pathArgs.length > 3 ? true : false\n if (isSidecar) {\n // Check for suitable datafile accompanying this sidecar\n const dataFile = utils.bids_files.checkSidecarForDatafiles(file, fileList)\n if (!dataFile) {\n issues.push(\n new Issue({\n code: 90,\n file: file,\n }),\n )\n }\n }\n}\n\nexport default validate\n" }, { "alpha_fraction": 0.7867646813392639, "alphanum_fraction": 0.7867646813392639, "avg_line_length": 58.01886749267578, "blob_id": "318992fc97fedf8f9486927b5282f129d33a8734", "content_id": "c9c00d5a9c80ec3ed96dc63bf11184e1a37fa1ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3128, "license_type": "permissive", "max_line_length": 387, "num_lines": 53, "path": "/bids-validator/src/README.md", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "# Deno based bids-validator\n\n## Intro\n\nThis is a partial rewrite of the bids-validator JavaScript implementation designed to read the [bids-specification schema](https://github.com/bids-standard/bids-specification/tree/master/src/schema) to apply the majority of validation rules.\n\nDeno is a JavaScript and TypeScript runtime that is used to run the schema based validator. Deno is simpler than Node.js and only requires one tool to use, the Deno executable itself. To install Deno, follow these [install instructions for your platform](https://deno.land/manual/getting_started/installation).\n\nAt the root of the repository there are two directories, `bids-validator` and `bids-validator-web`. These are separate npm packages, the Deno validator lives within the bids-validator package within the `src` directory.\n\n## Usage\n\nTo use the latest validator hosted at https://deno.land/x/bids_validator, use the following command:\n\n```console\n$ deno run --allow-read --allow-env https://deno.land/x/bids_validator/bids-validator.ts path/to/dataset\n```\n\nDeno by default sandboxes applications like a web browser. `--allow-read` allows the validator to read local files, and `--allow-env` enables OS-specific features.\n\n### Development tools\n\nFrom the repository root, use `bids-validator/bids-validator-deno` to run with all permissions enabled by default:\n\n```shell\n# Run from within the /bids-validator directory\ncd bids-validator\n# Run validator:\n./bids-validator-deno path/to/dataset\n```\n\n## Schema validator test suite\n\n```shell\n# Run tests:\ndeno test --allow-env --allow-read --allow-write src/\n```\n\nThis test suite includes running expected output from bids-examples and may throw some expected failures for bids-examples datasets where either the schema or validator are misaligned with the example dataset while under development.\n\n## Refreshing latest specification\n\nIf you are validating with the latest specification instead of a specific version, the validator will hold onto a cached version. You can request the newest version by adding the `--reload` argument to obtain the newest specification definition.\n\n`deno run --reload=https://bids-specification.readthedocs.io/en/latest/schema.json src/main.ts`\n\n## Modifying and building a new schema\n\nTo modify the schema a clone of bids-standard/bids-specification will need to be made. README and schema itself live here https://github.com/bids-standard/bids-specification/tree/master/src/schema.\n\nAfter changes to the schema have been made to a local copy the dereferenced single json file used by the validator will need to be built. The `bidsschematools` python package does this. It can be installed from pypi via pip or a local installation can be made. It lives in the specification repository here https://github.com/bids-standard/bids-specification/tree/master/tools/schemacode\n\nThe command to compile a dereferenced schema is `bst -v export --output src/schema.json` (this assumes you are in the root of the bids-specification repo). Once compiled it can be passed to the validator via the `-s` flag, `./bids-validator-deno -s <path to schema> <path to dataset>`\n" }, { "alpha_fraction": 0.4855676591396332, "alphanum_fraction": 0.49005773663520813, "avg_line_length": 27.345455169677734, "blob_id": "3e0ce3d90547fb34b888e7b2c277deae7b13f8b7", "content_id": "b02de023d3fc9197db81b6a1cc88aa6626636e0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1559, "license_type": "permissive", "max_line_length": 76, "num_lines": 55, "path": "/bids-validator-web/components/results/Issue.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// dependencies -------------------------------------------------------\n\nimport React from 'react'\nimport PropTypes from 'prop-types'\n\n// component setup ----------------------------------------------------\n\nexport default class Issue extends React.Component {\n // life cycle events --------------------------------------------------\n\n render() {\n let error = this.props.error\n\n // build error location string\n let errLocation = ''\n if (error.line) {\n errLocation += 'Line: ' + error.line + ' '\n }\n if (error.character) {\n errLocation += 'Character: ' + error.character + ''\n }\n if (errLocation === '' && error.evidence) {\n errLocation = 'Evidence: '\n }\n\n return (\n <div className=\"em-body issue\">\n <h4 className=\"em-header clearfix mt-4\">\n <strong className=\"em-header pull-left\">{error.file.name}</strong>\n <strong className=\"em-header pull-right\">\n {error.file.size / 1000} KB | {error.file.type}\n </strong>\n </h4>\n <span className=\"e-meta\">\n <label>Location: </label>\n <p>{error.file.webkitRelativePath}</p>\n <label>Reason: </label>\n <p>{error.reason}</p>\n </span>\n <span className=\"e-meta\">\n <label>{errLocation}</label>\n <p>{error.evidence}</p>\n </span>\n </div>\n )\n }\n\n // custom methods -----------------------------------------------------\n}\n\nIssue.propTypes = {\n file: PropTypes.object,\n error: PropTypes.object,\n type: PropTypes.string.isRequired,\n}\n" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.5877777934074402, "avg_line_length": 27.125, "blob_id": "b3f772b7bf0967750c7f2c07851af44abc3e5438", "content_id": "e4df5d5a530442852ab4a404471d5c8f89be1b37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 900, "license_type": "permissive", "max_line_length": 79, "num_lines": 32, "path": "/bids-validator/validators/tsv/checkAcqTimeFormat.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\nimport { isValid as dateIsValid, parseISO } from 'date-fns'\n\nconst checkAcqTimeFormat = function (rows, file, issues) {\n const rfc3339ish = /^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}(.\\d+)?Z?$/\n const header = rows[0]\n const acqTimeColumn = header.indexOf('acq_time')\n const testRows = rows.slice(1)\n testRows.map((line, i) => {\n const lineValues = line\n const acqTime = lineValues[acqTimeColumn]\n let isValid = dateIsValid(parseISO(acqTime)) && rfc3339ish.test(acqTime)\n\n if (acqTime === 'n/a') {\n isValid = true\n }\n\n if (acqTime && !isValid) {\n issues.push(\n new Issue({\n file: file,\n evidence: acqTime,\n line: i + 2,\n reason: 'acq_time is not in the format yyyy-MM-ddTHH:mm:ss[.000000]',\n code: 84,\n }),\n )\n }\n })\n}\n\nexport default checkAcqTimeFormat\n" }, { "alpha_fraction": 0.55252605676651, "alphanum_fraction": 0.5562683939933777, "avg_line_length": 23.940000534057617, "blob_id": "4e77fa58f4b5df19e397cd3ec2a5dcba8b6f4d42", "content_id": "2b9f8f1845a1c892dc5c0f83b84b56c8ef1ab05f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3741, "license_type": "permissive", "max_line_length": 60, "num_lines": 150, "path": "/bids-validator/utils/config.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import minimatch from 'minimatch'\n\nvar config = {\n /**\n * Ignored File\n */\n ignoredFile: function (conf, filePath) {\n if (conf.ignoredFiles) {\n for (var i = 0; i < conf.ignoredFiles.length; i++) {\n var ignoredPattern = conf.ignoredFiles[i]\n if (minimatch(filePath, ignoredPattern)) {\n return true\n }\n }\n }\n return false\n },\n\n /**\n * Interpret Config\n *\n * Takes a list of triggered codes and a config object\n * and create a map of modified severities\n */\n interpret: function (codes, conf) {\n var severityMap = {}\n\n if (conf.ignore && conf.ignore.length > 0) {\n var ignoreCodes = this.match(codes, conf.ignore)\n for (var i = 0; i < ignoreCodes.length; i++) {\n var ignoreCode = ignoreCodes[i]\n severityMap[ignoreCode] = 'ignore'\n }\n }\n\n if (conf.warn && conf.warn.length > 0) {\n var warnCodes = this.match(codes, conf.warn)\n for (var j = 0; j < warnCodes.length; j++) {\n var warnCode = warnCodes[j]\n severityMap[warnCode] = 'warning'\n }\n }\n\n if (conf.error && conf.error.length > 0) {\n var errorCodes = this.match(codes, conf.error)\n for (var k = 0; k < errorCodes.length; k++) {\n var errorCode = errorCodes[k]\n severityMap[errorCode] = 'error'\n }\n }\n\n return severityMap\n },\n\n /**\n * Match\n *\n * Takes a list of triggered codes and a config\n * object and returns the matched codes.\n */\n match: function (codes, conf) {\n var matches = []\n for (var i = 0; i < conf.length; i++) {\n var confCode = conf[i]\n if (codes.indexOf(confCode) > -1) {\n matches.push(confCode)\n } else if (\n confCode.hasOwnProperty('and') &&\n this.andFulfilled(codes, confCode.and)\n ) {\n // 'and' array fulfilled\n matches = matches.concat(this.flatten(confCode.and))\n }\n }\n return matches\n },\n\n /**\n * Flatten\n *\n * Takes an array that may contain objects with\n * 'and' or 'or' properties and flattens it.\n */\n flatten: function (list) {\n var codes = []\n for (var i = 0; i < list.length; i++) {\n var code = list[i]\n if (code.hasOwnProperty('and')) {\n codes = codes.concat(this.flatten(code.and))\n } else if (code.hasOwnProperty('or')) {\n codes = codes.concat(this.flatten(code.or))\n } else {\n codes.push(code)\n }\n }\n return codes\n },\n\n /**\n * And Fulfilled\n *\n * Takes an array of triggered code and an 'and'\n * array, recursively checks if it's fulfilled\n * and returns true if it is.\n */\n andFulfilled: function (codes, and) {\n for (var i = 0; i < and.length; i++) {\n var andCode = and[i]\n if (andCode.hasOwnProperty('and')) {\n if (!this.andFulfilled(codes, andCode.and)) {\n return false\n }\n } else if (andCode.hasOwnProperty('or')) {\n if (!this.orFulfilled(codes, andCode.or)) {\n return false\n }\n } else if (codes.indexOf(andCode) < 0) {\n return false\n }\n }\n return true\n },\n\n /**\n * Or Fulfilled\n *\n * Takes an array of triggered code and an 'or'\n * array, recursively checks if it's fulfilled\n * and returns true if it is.\n */\n orFulfilled: function (codes, or) {\n for (var i = 0; i < or.length; i++) {\n var orCode = or[i]\n if (orCode.hasOwnProperty('and')) {\n if (this.andFulfilled(codes, orCode.and)) {\n return true\n }\n } else if (orCode.hasOwnProperty('or')) {\n if (this.orFulfilled(codes, orCode.or)) {\n return true\n }\n } else if (codes.indexOf(orCode) > -1) {\n return true\n }\n }\n return false\n },\n}\n\nexport default config\n" }, { "alpha_fraction": 0.7006920576095581, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 26.5238094329834, "blob_id": "8547de9aa5c412666bf467f319d3f9340c06cd1b", "content_id": "fc9eb62a65fea3b1102d80471c9313d88f701b35", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 578, "license_type": "permissive", "max_line_length": 72, "num_lines": 21, "path": "/bids-validator/src/types/file.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Abstract validation File for all environments (Deno, Browser, Python)\n */\n\n// Avoid overloading the default File type\nexport interface BIDSFile {\n // Filename\n name: string\n // Dataset relative path for the file\n path: string\n // File size in bytes\n size: number\n // BIDS ignore status of the file\n ignored: boolean\n // ReadableStream to file raw contents\n stream: ReadableStream<Uint8Array>\n // Resolve stream to decoded utf-8 text\n text: () => Promise<string>\n // Read a range of bytes\n readBytes: (size: number, offset?: number) => Promise<Uint8Array>\n}\n" }, { "alpha_fraction": 0.6938775777816772, "alphanum_fraction": 0.7278911471366882, "avg_line_length": 20, "blob_id": "4c9861b3e0af555e643013b5539cf5fa57e4e4e2", "content_id": "1854bbd4b8f4555fd782137f2c19960718e378b2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 147, "license_type": "permissive", "max_line_length": 57, "num_lines": 7, "path": "/bids-validator/src/deps/asserts.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export {\n assert,\n assertEquals,\n assertObjectMatch,\n assertExists,\n assertRejects,\n} from 'https://deno.land/[email protected]/testing/asserts.ts'\n" }, { "alpha_fraction": 0.6507458686828613, "alphanum_fraction": 0.6523236036300659, "avg_line_length": 26.234375, "blob_id": "2e4313a7d58a62fba9c8c43300c1a9af9da135b3", "content_id": "e7d8ead00ab5b73d8ddd7c42728d48ffc6bf9788", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6972, "license_type": "permissive", "max_line_length": 104, "num_lines": 256, "path": "/bids-validator/validators/session.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../utils'\nconst sesUtils = utils.files.sessions\nvar Issue = utils.issues.Issue\nimport isNode from '../utils/isNode'\n\n/**\n * session\n *\n * Takes a list of files and creates a set of file names that occur in subject\n * directories. Then generates a warning if a given subject is missing any\n * files from the set.\n */\nconst session = function missingSessionFiles(fileList) {\n const issues = []\n const { subjects, sessions } = getDataOrganization(fileList)\n\n issues.push(...missingSessionWarnings(subjects, sessions))\n\n const subject_files = getSubjectFiles(subjects)\n issues.push(...missingFileWarnings(subjects, subject_files))\n\n return issues\n}\n\n/**\n * getDataOrganization\n *\n * takes a list of files and returns a dictionary of subjects and a list of sessions\n */\nfunction getDataOrganization(fileList) {\n const subjects = {}\n const sessions = []\n\n for (let key in fileList) {\n if (fileList.hasOwnProperty(key)) {\n const file = fileList[key]\n\n if (!file || (!isNode && !file.webkitRelativePath)) continue\n\n const path = file.relativePath\n if (!utils.type.isBIDS(path) || utils.type.file.isStimuliData(path))\n continue\n\n //match the subject identifier up to the '/' in the full path to a file.\n let subjKey\n const match = path.match(/sub-(.*?)(?=\\/)/)\n if (match === null) continue\n else subjKey = match[0]\n\n // suppress inconsistent subject warnings for sub-emptyroom scans\n // in MEG data\n if (subjKey == 'sub-emptyroom') continue\n\n // initialize a subject object if we haven't seen this subject before\n subjects[subjKey] = subjects[subjKey] || new sesUtils.Subject()\n\n let filename = getFilename(path, subjKey)\n subjects[subjKey].files.push(filename)\n\n const sessionMatch = filename.match(sesUtils.sessionMatcher)\n if (sessionMatch) {\n // extract session name\n const sessionName = sessionMatch[1]\n // add session to sessions if not already there\n if (!sessions.includes(sessionName)) {\n sessions.push(sessionName)\n }\n if (!subjects[subjKey].sessions.includes(sessionName))\n subjects[subjKey].sessions.push(sessionName)\n }\n }\n }\n\n return { subjects, sessions }\n}\n\n/**\n * getFilename\n *\n * takes a filepath and a subject key and\n * returns file name\n */\nfunction getFilename(path, subjKey) {\n // files are prepended with subject name, the following two commands\n // remove the subject from the file name to allow filenames to be more\n // easily compared\n let filename = path.substring(path.match(subjKey).index + subjKey.length)\n filename = filename.replace(subjKey, '<sub>')\n return filename\n}\n\n/**\n * missingSessionWarnings\n *\n * take subjects and sessions\n * pushes missing session warnings to issues list\n * and returns issues\n */\nfunction missingSessionWarnings(subjects, sessions) {\n const issues = []\n for (let subjKey in subjects) {\n if (subjects.hasOwnProperty(subjKey)) {\n const subject = subjects[subjKey]\n\n // push warning to issues if missing session\n if (sessions.length > 0) {\n sessions.forEach((commonSession) => {\n if (!subject.sessions.includes(commonSession)) {\n subject.missingSessions.push(commonSession)\n const path = `/${subjKey}/${commonSession}`\n issues.push(\n new Issue({\n file: {\n relativePath: path,\n webkitRelativePath: path,\n name: commonSession,\n path,\n },\n reason:\n 'A session is missing from one subject that is present in at least one other subject',\n evidence: `Subject: ${subjKey}; Missing session: ${commonSession}`,\n code: 97,\n }),\n )\n }\n })\n }\n }\n }\n return issues\n}\n\n/**\n * getSubjectFiles\n *\n * takes a list of subjects and returns a list of each file\n */\n\nfunction getSubjectFiles(subjects) {\n const subject_files = []\n for (let subjKey in subjects) {\n if (subjects.hasOwnProperty(subjKey)) {\n const subject = subjects[subjKey]\n\n // add files to subject_files if not already listed\n subject.files.forEach((file) => {\n if (subject_files.indexOf(file) < 0) {\n subject_files.push(file)\n }\n })\n }\n }\n return subject_files\n}\n\n/**\n * missingFileWarnings\n *\n * takes a list of subjects and a list of common files and\n * generates an issue for each file missing from each subject\n * returns list of issues\n */\nfunction missingFileWarnings(subjects, subject_files) {\n const issues = []\n var subjectKeys = Object.keys(subjects).sort()\n subjectKeys.forEach((subjKey) => {\n subject_files.forEach((filename) => {\n const fileInMissingSession = checkFileInMissingSession(\n filename,\n subjects[subjKey],\n )\n\n if (!fileInMissingSession) {\n const missingFileWarning = checkMissingFile(\n subjects[subjKey],\n subjKey,\n filename,\n )\n if (missingFileWarning) issues.push(missingFileWarning)\n }\n })\n })\n return issues\n}\n\n/**\n * checkFileInMissingSession\n *\n * takes a file(path) and the subject object it should belong to and\n * returns whether or not the file is in a missing session\n */\nfunction checkFileInMissingSession(filePath, subject) {\n let fileSession\n const sessionMatch = filePath.match(sesUtils.sessionMatcher)\n\n // if sessions are in use, extract session name from file\n // and test if\n if (sessionMatch) {\n fileSession = sessionMatch[1]\n return subject.missingSessions.includes(fileSession)\n } else {\n return false\n }\n}\n\n/**\n * checkMissingFile\n *\n * takes a list of subjects, the subject key, and the expected file and\n * returns an issue if the file is missing\n */\nfunction checkMissingFile(subject, subjKey, filename) {\n const subjectMissingFile = subject.files.indexOf(filename) === -1\n\n if (subjectMissingFile) {\n var fileThatsMissing = '/' + subjKey + filename.replace('<sub>', subjKey)\n const fileName = fileThatsMissing.substr(\n fileThatsMissing.lastIndexOf('/') + 1,\n )\n return new Issue({\n file: {\n relativePath: fileThatsMissing,\n webkitRelativePath: fileThatsMissing,\n name: fileName,\n path: fileThatsMissing,\n },\n evidence: `Subject: ${subjKey}; Missing file: ${fileName}`,\n reason:\n 'This file is missing for subject ' +\n subjKey +\n ', but is present for at least one other subject.',\n code: 38,\n })\n }\n}\n\nexport {\n session,\n getDataOrganization,\n getFilename,\n missingSessionWarnings,\n getSubjectFiles,\n missingFileWarnings,\n checkFileInMissingSession,\n checkMissingFile,\n}\nexport default {\n session,\n getDataOrganization,\n getFilename,\n missingSessionWarnings,\n getSubjectFiles,\n missingFileWarnings,\n checkFileInMissingSession,\n checkMissingFile,\n}\n" }, { "alpha_fraction": 0.6531986594200134, "alphanum_fraction": 0.6531986594200134, "avg_line_length": 30.263158798217773, "blob_id": "8bf5b2e0b9c1e8952590747f6bfb0c453377d38b", "content_id": "a44fbeff580180bf1a2862501acc0e064606c1a3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 594, "license_type": "permissive", "max_line_length": 61, "num_lines": 19, "path": "/bids-validator/src/tests/local/valid_filenames.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// Deno runtime tests for tests/data/valid_filenames\nimport { assert, assertEquals } from '../../deps/asserts.ts'\nimport { validatePath, formatAssertIssue } from './common.ts'\n\nconst PATH = 'tests/data/valid_filenames'\n\nDeno.test('valid_filenames dataset', async (t) => {\n const { tree, result } = await validatePath(t, PATH)\n\n await t.step('correctly ignores .bidsignore files', () => {\n assert(\n result.issues.get('NOT_INCLUDED') === undefined,\n formatAssertIssue(\n 'NOT_INCLUDED should not be present',\n result.issues.get('NOT_INCLUDED'),\n ),\n )\n })\n})\n" }, { "alpha_fraction": 0.49209266901016235, "alphanum_fraction": 0.5229864120483398, "avg_line_length": 26.46464729309082, "blob_id": "58f03cfd6b3a399aea24d892a9de759e90b658c2", "content_id": "295a3b4bc6839aafd114dad03dc74a72bccf4a64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5438, "license_type": "permissive", "max_line_length": 82, "num_lines": 198, "path": "/bids-validator/tests/hed.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport validateHed from '../validators/hed'\n\ndescribe('HED', function () {\n const jsonFiles = [\n {\n relativePath: '/sub01/sub01_task-test_events.json',\n path: '/sub01/sub01_task-test_events.json',\n },\n {\n relativePath: '/dataset_description.json',\n path: '/dataset_description.json',\n },\n ]\n\n it('should not throw an issue if the HED data is valid', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\tsomething\\tone\\tSpeed/30 mph\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n myCodes: {\n HED: {\n one: 'Duration/5 s',\n },\n },\n },\n '/dataset_description.json': { HEDVersion: '8.0.0' },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.deepStrictEqual(issues, [])\n })\n })\n\n it('should not throw an issue if a value column is annotated', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\t3.0\\tone\\tSpeed/30 mph\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n myCodes: {\n test: {\n HED: {\n one: 'Label/#',\n },\n },\n },\n },\n '/dataset_description.json': { HEDVersion: '8.0.0' },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.deepStrictEqual(issues, [])\n })\n })\n\n it('should not throw an issue if a library schema is included', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\t3.0\\tone\\tSpeed/30 mph\\n',\n },\n ]\n\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n myCodes: {\n test: {\n HED: {\n one: 'ts:Sensory-presentation, Label/#',\n },\n },\n },\n },\n '/dataset_description.json': {\n HEDVersion: ['8.0.0', 'ts:testlib_1.0.2'],\n },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.deepStrictEqual(issues, [])\n })\n })\n\n it('should throw an issue if the HED data is invalid', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\tsomething\\tone\\tDuration/5 s\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n test: {\n HED: {\n one: 'Speed/5 ms',\n },\n },\n },\n '/dataset_description.json': { HEDVersion: '8.0.0' },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.strictEqual(issues.length, 1)\n assert.strictEqual(issues[0].code, 104)\n })\n })\n\n it('should not throw an issue if multiple library schemas are included', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\t3.0\\tone\\tSpeed/30 mph\\n',\n },\n ]\n\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n myCodes: {\n test: {\n HED: {\n one: 'ts:Sensory-presentation, Label/#, sc:Sleep-deprivation',\n },\n },\n },\n },\n '/dataset_description.json': {\n HEDVersion: ['8.0.0', 'ts:testlib_1.0.2', 'sc:score_1.0.0'],\n },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.deepStrictEqual(issues, [])\n })\n })\n\n it('should properly issue warnings when appropriate', () => {\n const events = [\n {\n file: {\n path: '/sub01/sub01_task-test_events.tsv',\n relativePath: '/sub01/sub01_task-test_events.tsv',\n },\n path: '/sub01/sub01_task-test_events.tsv',\n contents:\n 'onset\\tduration\\ttest\\tHED\\n' + '7\\tsomething\\tone\\tHuman/Driver\\n',\n },\n ]\n const jsonDictionary = {\n '/sub01/sub01_task-test_events.json': {\n test: {\n HED: {\n one: 'Train/Maglev',\n },\n },\n },\n '/dataset_description.json': { HEDVersion: '8.0.0' },\n }\n\n return validateHed(events, jsonDictionary, jsonFiles, '').then((issues) => {\n assert.strictEqual(issues.length, 2)\n assert.strictEqual(issues[0].code, 105)\n assert.strictEqual(issues[1].code, 105)\n })\n })\n})\n" }, { "alpha_fraction": 0.6859592795372009, "alphanum_fraction": 0.6891747117042542, "avg_line_length": 29.09677505493164, "blob_id": "8dcf000869d3c501cf6031bce886c0d83bb06914", "content_id": "ea0d0963c89819a561d5ef38aa7030908fb91364", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1866, "license_type": "permissive", "max_line_length": 85, "num_lines": 62, "path": "/bids-validator/validators/bids/subjects.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\n\nconst participantsInSubjects = (participants, subjects) => {\n const issues = []\n if (participants) {\n const participantsFromFile = participants.list.sort()\n const participantsFromFolders = subjects.sort()\n if (\n !utils.array.equals(participantsFromFolders, participantsFromFile, true)\n ) {\n const evidence = constructMismatchEvidence(\n participantsFromFile,\n participantsFromFolders,\n )\n issues.push(\n new Issue({\n code: 49,\n evidence: evidence,\n file: participants.file,\n }),\n )\n }\n }\n return issues\n}\n\nconst constructMismatchEvidence = (participants, subjects) => {\n const diffs = utils.array.diff(participants, subjects)\n const subjectsNotInSubjectsArray = diffs[0]\n const subjectsNotInParticipantsArray = diffs[1]\n const evidenceOfMissingParticipants = subjectsNotInParticipantsArray.length\n ? 'Subjects ' +\n subjectsNotInParticipantsArray.join(', ') +\n ' were found in the folder structure but are missing in participants.tsv. '\n : ''\n const evidenceOfMissingSubjects = subjectsNotInSubjectsArray.length\n ? 'Subjects ' +\n subjectsNotInSubjectsArray.join(', ') +\n ' were found in participants.tsv but are not present in the folder structure. '\n : ''\n const evidence = evidenceOfMissingParticipants + evidenceOfMissingSubjects\n return evidence\n}\n\nconst atLeastOneSubject = (fileList) => {\n const issues = []\n const fileKeys = Object.keys(fileList)\n const hasSubjectDir = fileKeys.some((key) => {\n const file = fileList[key]\n return file.relativePath && file.relativePath.startsWith('/sub-')\n })\n if (!hasSubjectDir) {\n issues.push(new Issue({ code: 45 }))\n }\n return issues\n}\n\nexport default {\n participantsInSubjects,\n atLeastOneSubject,\n}\n" }, { "alpha_fraction": 0.575615644454956, "alphanum_fraction": 0.5799399614334106, "avg_line_length": 27.657487869262695, "blob_id": "83d0b572d75c9f691605b608e4f4b220f9843283", "content_id": "a70906317f800473e93a384f95107632eaa50e96", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 16650, "license_type": "permissive", "max_line_length": 95, "num_lines": 581, "path": "/bids-validator/src/deps/ignore.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// @ts-nocheck This is an NPM module we depend on forked for Deno\n// See https://github.com/kaelzhang/node-ignore/blob/master/index.js and following license text\n/**\n Copyright (c) 2013 Kael Zhang <[email protected]>, contributors\n http://kael.me/\n\n Permission is hereby granted, free of charge, to any person obtaining\n a copy of this software and associated documentation files (the\n \"Software\"), to deal in the Software without restriction, including\n without limitation the rights to use, copy, modify, merge, publish,\n distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so, subject to\n the following conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\n LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n */\n// A simple implementation of make-array\nfunction makeArray(subject) {\n return Array.isArray(subject) ? subject : [subject]\n}\n\nconst EMPTY = ''\nconst SPACE = ' '\nconst ESCAPE = '\\\\'\nconst REGEX_TEST_BLANK_LINE = /^\\s+$/\nconst REGEX_INVALID_TRAILING_BACKSLASH = /(?:[^\\\\]|^)\\\\$/\nconst REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION = /^\\\\!/\nconst REGEX_REPLACE_LEADING_EXCAPED_HASH = /^\\\\#/\nconst REGEX_SPLITALL_CRLF = /\\r?\\n/g\n// /foo,\n// ./foo,\n// ../foo,\n// .\n// ..\nconst REGEX_TEST_INVALID_PATH = /^\\.*\\/|^\\.+$/\n\nconst SLASH = '/'\n\n// Do not use ternary expression here, since \"istanbul ignore next\" is buggy\nlet TMP_KEY_IGNORE = 'node-ignore'\n/* istanbul ignore else */\nif (typeof Symbol !== 'undefined') {\n TMP_KEY_IGNORE = Symbol.for('node-ignore')\n}\nconst KEY_IGNORE = TMP_KEY_IGNORE\n\nconst define = (object, key, value) =>\n Object.defineProperty(object, key, { value })\n\nconst REGEX_REGEXP_RANGE = /([0-z])-([0-z])/g\n\nconst RETURN_FALSE = () => false\n\n// Sanitize the range of a regular expression\n// The cases are complicated, see test cases for details\nconst sanitizeRange = (range) =>\n range.replace(REGEX_REGEXP_RANGE, (match, from, to) =>\n from.charCodeAt(0) <= to.charCodeAt(0)\n ? match\n : // Invalid range (out of order) which is ok for gitignore rules but\n // fatal for JavaScript regular expression, so eliminate it.\n EMPTY,\n )\n\n// See fixtures #59\nconst cleanRangeBackSlash = (slashes) => {\n const { length } = slashes\n return slashes.slice(0, length - (length % 2))\n}\n\n// > If the pattern ends with a slash,\n// > it is removed for the purpose of the following description,\n// > but it would only find a match with a directory.\n// > In other words, foo/ will match a directory foo and paths underneath it,\n// > but will not match a regular file or a symbolic link foo\n// > (this is consistent with the way how pathspec works in general in Git).\n// '`foo/`' will not match regular file '`foo`' or symbolic link '`foo`'\n// -> ignore-rules will not deal with it, because it costs extra `fs.stat` call\n// you could use option `mark: true` with `glob`\n\n// '`foo/`' should not continue with the '`..`'\nconst REPLACERS = [\n // > Trailing spaces are ignored unless they are quoted with backslash (\"\\\")\n [\n // (a\\ ) -> (a )\n // (a ) -> (a)\n // (a \\ ) -> (a )\n /\\\\?\\s+$/,\n (match) => (match.indexOf('\\\\') === 0 ? SPACE : EMPTY),\n ],\n\n // replace (\\ ) with ' '\n [/\\\\\\s/g, () => SPACE],\n\n // Escape metacharacters\n // which is written down by users but means special for regular expressions.\n\n // > There are 12 characters with special meanings:\n // > - the backslash \\,\n // > - the caret ^,\n // > - the dollar sign $,\n // > - the period or dot .,\n // > - the vertical bar or pipe symbol |,\n // > - the question mark ?,\n // > - the asterisk or star *,\n // > - the plus sign +,\n // > - the opening parenthesis (,\n // > - the closing parenthesis ),\n // > - and the opening square bracket [,\n // > - the opening curly brace {,\n // > These special characters are often called \"metacharacters\".\n [/[\\\\$.|*+(){^]/g, (match) => `\\\\${match}`],\n\n [\n // > a question mark (?) matches a single character\n /(?!\\\\)\\?/g,\n () => '[^/]',\n ],\n\n // leading slash\n [\n // > A leading slash matches the beginning of the pathname.\n // > For example, \"/*.c\" matches \"cat-file.c\" but not \"mozilla-sha1/sha1.c\".\n // A leading slash matches the beginning of the pathname\n /^\\//,\n () => '^',\n ],\n\n // replace special metacharacter slash after the leading slash\n [/\\//g, () => '\\\\/'],\n\n [\n // > A leading \"**\" followed by a slash means match in all directories.\n // > For example, \"**/foo\" matches file or directory \"foo\" anywhere,\n // > the same as pattern \"foo\".\n // > \"**/foo/bar\" matches file or directory \"bar\" anywhere that is directly\n // > under directory \"foo\".\n // Notice that the '*'s have been replaced as '\\\\*'\n /^\\^*\\\\\\*\\\\\\*\\\\\\//,\n\n // '**/foo' <-> 'foo'\n () => '^(?:.*\\\\/)?',\n ],\n\n // starting\n [\n // there will be no leading '/'\n // (which has been replaced by section \"leading slash\")\n // If starts with '**', adding a '^' to the regular expression also works\n /^(?=[^^])/,\n function startingReplacer() {\n // If has a slash `/` at the beginning or middle\n return !/\\/(?!$)/.test(this)\n ? // > Prior to 2.22.1\n // > If the pattern does not contain a slash /,\n // > Git treats it as a shell glob pattern\n // Actually, if there is only a trailing slash,\n // git also treats it as a shell glob pattern\n\n // After 2.22.1 (compatible but clearer)\n // > If there is a separator at the beginning or middle (or both)\n // > of the pattern, then the pattern is relative to the directory\n // > level of the particular .gitignore file itself.\n // > Otherwise the pattern may also match at any level below\n // > the .gitignore level.\n '(?:^|\\\\/)'\n : // > Otherwise, Git treats the pattern as a shell glob suitable for\n // > consumption by fnmatch(3)\n '^'\n },\n ],\n\n // two globstars\n [\n // Use lookahead assertions so that we could match more than one `'/**'`\n /\\\\\\/\\\\\\*\\\\\\*(?=\\\\\\/|$)/g,\n\n // Zero, one or several directories\n // should not use '*', or it will be replaced by the next replacer\n\n // Check if it is not the last `'/**'`\n (_, index, str) =>\n index + 6 < str.length\n ? // case: /**/\n // > A slash followed by two consecutive asterisks then a slash matches\n // > zero or more directories.\n // > For example, \"a/**/b\" matches \"a/b\", \"a/x/b\", \"a/x/y/b\" and so on.\n // '/**/'\n '(?:\\\\/[^\\\\/]+)*'\n : // case: /**\n // > A trailing `\"/**\"` matches everything inside.\n\n // #21: everything inside but it should not include the current folder\n '\\\\/.+',\n ],\n\n // normal intermediate wildcards\n [\n // Never replace escaped '*'\n // ignore rule '\\*' will match the path '*'\n\n // 'abc.*/' -> go\n // 'abc.*' -> skip this rule,\n // coz trailing single wildcard will be handed by [trailing wildcard]\n /(^|[^\\\\]+)(\\\\\\*)+(?=.+)/g,\n\n // '*.js' matches '.js'\n // '*.js' doesn't match 'abc'\n (_, p1, p2) => {\n // 1.\n // > An asterisk \"*\" matches anything except a slash.\n // 2.\n // > Other consecutive asterisks are considered regular asterisks\n // > and will match according to the previous rules.\n const unescaped = p2.replace(/\\\\\\*/g, '[^\\\\/]*')\n return p1 + unescaped\n },\n ],\n\n [\n // unescape, revert step 3 except for back slash\n // For example, if a user escape a '\\\\*',\n // after step 3, the result will be '\\\\\\\\\\\\*'\n /\\\\\\\\\\\\(?=[$.|*+(){^])/g,\n () => ESCAPE,\n ],\n\n [\n // '\\\\\\\\' -> '\\\\'\n /\\\\\\\\/g,\n () => ESCAPE,\n ],\n\n [\n // > The range notation, e.g. [a-zA-Z],\n // > can be used to match one of the characters in a range.\n\n // `\\` is escaped by step 3\n /(\\\\)?\\[([^\\]/]*?)(\\\\*)($|\\])/g,\n (match, leadEscape, range, endEscape, close) =>\n leadEscape === ESCAPE\n ? // '\\\\[bar]' -> '\\\\\\\\[bar\\\\]'\n `\\\\[${range}${cleanRangeBackSlash(endEscape)}${close}`\n : close === ']'\n ? endEscape.length % 2 === 0\n ? // A normal case, and it is a range notation\n // '[bar]'\n // '[bar\\\\\\\\]'\n `[${sanitizeRange(range)}${endEscape}]`\n : // Invalid range notaton\n // '[bar\\\\]' -> '[bar\\\\\\\\]'\n '[]'\n : '[]',\n ],\n\n // ending\n [\n // 'js' will not match 'js.'\n // 'ab' will not match 'abc'\n /(?:[^*])$/,\n\n // WTF!\n // https://git-scm.com/docs/gitignore\n // changes in [2.22.1](https://git-scm.com/docs/gitignore/2.22.1)\n // which re-fixes #24, #38\n\n // > If there is a separator at the end of the pattern then the pattern\n // > will only match directories, otherwise the pattern can match both\n // > files and directories.\n\n // 'js*' will not match 'a.js'\n // 'js/' will not match 'a.js'\n // 'js' will match 'a.js' and 'a.js/'\n (match) =>\n /\\/$/.test(match)\n ? // foo/ will not match 'foo'\n `${match}$`\n : // foo matches 'foo' and 'foo/'\n `${match}(?=$|\\\\/$)`,\n ],\n\n // trailing wildcard\n [\n /(\\^|\\\\\\/)?\\\\\\*$/,\n (_, p1) => {\n const prefix = p1\n ? // '\\^':\n // '/*' does not match EMPTY\n // '/*' does not match everything\n\n // '\\\\\\/':\n // 'abc/*' does not match 'abc/'\n `${p1}[^/]+`\n : // 'a*' matches 'a'\n // 'a*' matches 'aa'\n '[^/]*'\n\n return `${prefix}(?=$|\\\\/$)`\n },\n ],\n]\n\n// A simple cache, because an ignore rule only has only one certain meaning\nconst regexCache = Object.create(null)\n\n// @param {pattern}\nconst makeRegex = (pattern, ignoreCase) => {\n let source = regexCache[pattern]\n\n if (!source) {\n source = REPLACERS.reduce(\n (prev, current) => prev.replace(current[0], current[1].bind(pattern)),\n pattern,\n )\n regexCache[pattern] = source\n }\n\n return ignoreCase ? new RegExp(source, 'i') : new RegExp(source)\n}\n\nconst isString = (subject) => typeof subject === 'string'\n\n// > A blank line matches no files, so it can serve as a separator for readability.\nconst checkPattern = (pattern) =>\n pattern &&\n isString(pattern) &&\n !REGEX_TEST_BLANK_LINE.test(pattern) &&\n !REGEX_INVALID_TRAILING_BACKSLASH.test(pattern) &&\n // > A line starting with # serves as a comment.\n pattern.indexOf('#') !== 0\n\nconst splitPattern = (pattern) => pattern.split(REGEX_SPLITALL_CRLF)\n\nclass IgnoreRule {\n constructor(origin, pattern, negative, regex) {\n this.origin = origin\n this.pattern = pattern\n this.negative = negative\n this.regex = regex\n }\n}\n\nconst createRule = (pattern, ignoreCase) => {\n const origin = pattern\n let negative = false\n\n // > An optional prefix \"!\" which negates the pattern;\n if (pattern.indexOf('!') === 0) {\n negative = true\n pattern = pattern.substr(1)\n }\n\n pattern = pattern\n // > Put a backslash (\"\\\") in front of the first \"!\" for patterns that\n // > begin with a literal \"!\", for example, `\"\\!important!.txt\"`.\n .replace(REGEX_REPLACE_LEADING_EXCAPED_EXCLAMATION, '!')\n // > Put a backslash (\"\\\") in front of the first hash for patterns that\n // > begin with a hash.\n .replace(REGEX_REPLACE_LEADING_EXCAPED_HASH, '#')\n\n const regex = makeRegex(pattern, ignoreCase)\n\n return new IgnoreRule(origin, pattern, negative, regex)\n}\n\nconst throwError = (message, Ctor) => {\n throw new Ctor(message)\n}\n\nconst checkPath = (path, originalPath, doThrow) => {\n if (!isString(path)) {\n return doThrow(\n `path must be a string, but got \\`${originalPath}\\``,\n TypeError,\n )\n }\n\n // We don't know if we should ignore EMPTY, so throw\n if (!path) {\n return doThrow(`path must not be empty`, TypeError)\n }\n\n // Check if it is a relative path\n if (checkPath.isNotRelative(path)) {\n const r = '`path.relative()`d'\n return doThrow(\n `path should be a ${r} string, but got \"${originalPath}\"`,\n RangeError,\n )\n }\n\n return true\n}\n\nconst isNotRelative = (path) => REGEX_TEST_INVALID_PATH.test(path)\n\ncheckPath.isNotRelative = isNotRelative\ncheckPath.convert = (p) => p\n\nexport class Ignore {\n constructor({\n ignorecase = true,\n ignoreCase = ignorecase,\n allowRelativePaths = false,\n } = {}) {\n define(this, KEY_IGNORE, true)\n\n this._rules = []\n this._ignoreCase = ignoreCase\n this._allowRelativePaths = allowRelativePaths\n this._initCache()\n }\n\n _initCache() {\n this._ignoreCache = Object.create(null)\n this._testCache = Object.create(null)\n }\n\n _addPattern(pattern) {\n // #32\n if (pattern && pattern[KEY_IGNORE]) {\n this._rules = this._rules.concat(pattern._rules)\n this._added = true\n return\n }\n\n if (checkPattern(pattern)) {\n const rule = createRule(pattern, this._ignoreCase)\n this._added = true\n this._rules.push(rule)\n }\n }\n\n // @param {Array<string> | string | Ignore} pattern\n add(pattern) {\n this._added = false\n\n makeArray(isString(pattern) ? splitPattern(pattern) : pattern).forEach(\n this._addPattern,\n this,\n )\n\n // Some rules have just added to the ignore,\n // making the behavior changed.\n if (this._added) {\n this._initCache()\n }\n\n return this\n }\n\n // legacy\n addPattern(pattern) {\n return this.add(pattern)\n }\n\n // | ignored : unignored\n // negative | 0:0 | 0:1 | 1:0 | 1:1\n // -------- | ------- | ------- | ------- | --------\n // 0 | TEST | TEST | SKIP | X\n // 1 | TESTIF | SKIP | TEST | X\n\n // - SKIP: always skip\n // - TEST: always test\n // - TESTIF: only test if checkUnignored\n // - X: that never happen\n\n // @param {boolean} whether should check if the path is unignored,\n // setting `checkUnignored` to `false` could reduce additional\n // path matching.\n\n // @returns {TestResult} true if a file is ignored\n _testOne(path, checkUnignored) {\n let ignored = false\n let unignored = false\n\n this._rules.forEach((rule) => {\n const { negative } = rule\n if (\n (unignored === negative && ignored !== unignored) ||\n (negative && !ignored && !unignored && !checkUnignored)\n ) {\n return\n }\n\n const matched = rule.regex.test(path)\n\n if (matched) {\n ignored = !negative\n unignored = negative\n }\n })\n\n return {\n ignored,\n unignored,\n }\n }\n\n // @returns {TestResult}\n _test(originalPath, cache, checkUnignored, slices) {\n const path =\n originalPath &&\n // Supports nullable path\n checkPath.convert(originalPath)\n\n checkPath(\n path,\n originalPath,\n this._allowRelativePaths ? RETURN_FALSE : throwError,\n )\n\n return this._t(path, cache, checkUnignored, slices)\n }\n\n _t(path, cache, checkUnignored, slices) {\n if (path in cache) {\n return cache[path]\n }\n\n if (!slices) {\n // path/to/a.js\n // ['path', 'to', 'a.js']\n slices = path.split(SLASH)\n }\n\n slices.pop()\n\n // If the path has no parent directory, just test it\n if (!slices.length) {\n return (cache[path] = this._testOne(path, checkUnignored))\n }\n\n const parent = this._t(\n slices.join(SLASH) + SLASH,\n cache,\n checkUnignored,\n slices,\n )\n\n // If the path contains a parent directory, check the parent first\n return (cache[path] = parent.ignored\n ? // > It is not possible to re-include a file if a parent directory of\n // > that file is excluded.\n parent\n : this._testOne(path, checkUnignored))\n }\n\n ignores(path) {\n return this._test(path, this._ignoreCache, false).ignored\n }\n\n createFilter() {\n return (path) => !this.ignores(path)\n }\n\n filter(paths) {\n return makeArray(paths).filter(this.createFilter())\n }\n\n // @returns {TestResult}\n test(path) {\n return this._test(path, this._testCache, true)\n }\n}\n\nexport const ignore = (options) => new Ignore(options)\n\nconst isPathValid = (path) =>\n checkPath(path && checkPath.convert(path), path, RETURN_FALSE)\n\nignore.isPathValid = isPathValid\n" }, { "alpha_fraction": 0.6605504751205444, "alphanum_fraction": 0.7064220309257507, "avg_line_length": 35.33333206176758, "blob_id": "3de4f6301489a85611cc7db829a8ec689c070435", "content_id": "8fd2bcc02166163ea28d391cedb794dd1efbe65e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 109, "license_type": "permissive", "max_line_length": 63, "num_lines": 3, "path": "/bids-validator/src/tests/nullReadBytes.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export const nullReadBytes = (size: number, offset = 1024) => {\n return Promise.resolve(new Uint8Array())\n}\n" }, { "alpha_fraction": 0.6434862613677979, "alphanum_fraction": 0.6456880569458008, "avg_line_length": 30.99608612060547, "blob_id": "6a065a8905d6bcc318fddaf4187da9eaa38a9894", "content_id": "c3458d9d74699aaa511856d3fb0150c3dbac654d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 16350, "license_type": "permissive", "max_line_length": 86, "num_lines": 511, "path": "/bids-validator/utils/type.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Type\n *\n * A library of functions that take a file path and return a boolean\n * representing whether the given file path is valid within the\n * BIDS specification requirements.\n */\n\n/**\n * Import RegExps from bids-validator-common\n */\nimport associated_data_rules from '../bids_validator/rules/associated_data_rules.json'\n\nimport file_level_rules from '../bids_validator/rules/file_level_rules.json'\nimport phenotypic_rules from '../bids_validator/rules/phenotypic_rules.json'\nimport session_level_rules from '../bids_validator/rules/session_level_rules.json'\nimport subject_level_rules from '../bids_validator/rules/subject_level_rules.json'\nimport top_level_rules from '../bids_validator/rules/top_level_rules.json'\n\nlet bids_schema\n\n// Alternative method of loading from bids-specification schema\nexport function schemaSetup(schema) {\n bids_schema = schema\n}\n\n// Associated data\nconst associatedData = buildRegExp(associated_data_rules.associated_data)\n// File level\nconst anatNonparametric = buildRegExp(file_level_rules.anat_nonparametric)\nconst anatParametric = buildRegExp(file_level_rules.anat_parametric)\nconst anatDefacemask = buildRegExp(file_level_rules.anat_defacemask)\nconst anatMultiEcho = buildRegExp(file_level_rules.anat_multiecho)\nconst anatMultiFlip = buildRegExp(file_level_rules.anat_multiflip)\nconst anatMultiInv = buildRegExp(file_level_rules.anat_multiinv)\nconst anatMP2RAGE = buildRegExp(file_level_rules.anat_mp2rage)\nconst anatVFAMT = buildRegExp(file_level_rules.anat_vfa_mt)\nconst anatMTR = buildRegExp(file_level_rules.anat_mtr)\nconst behavioralData = buildRegExp(file_level_rules.behavioral)\nconst dwiData = buildRegExp(file_level_rules.dwi)\nconst eegData = buildRegExp(file_level_rules.eeg)\nconst fmapGre = buildRegExp(file_level_rules.fmap_gre)\nconst fmapPepolarAsl = buildRegExp(file_level_rules.fmap_pepolar_asl)\nconst fmapTB1DAM = buildRegExp(file_level_rules.fmap_TB1DAM)\nconst fmapTB1EPI = buildRegExp(file_level_rules.fmap_TB1EPI)\nconst fmapRF = buildRegExp(file_level_rules.fmap_rf)\nconst fmapTB1SRGE = buildRegExp(file_level_rules.fmap_TB1SRGE)\nconst fmapParametric = buildRegExp(file_level_rules.fmap_parametric)\nconst func = buildRegExp(file_level_rules.func)\nconst funcPhaseDeprecated = buildRegExp(file_level_rules.func_phase_deprecated)\nconst funcEvents = buildRegExp(file_level_rules.func_events)\nconst funcTimeseries = buildRegExp(file_level_rules.func_timeseries)\nconst funcBoldData = buildRegExp(file_level_rules.func_bold)\nconst aslData = buildRegExp(file_level_rules.asl)\nconst ieegData = buildRegExp(file_level_rules.ieeg)\nconst megData = buildRegExp(file_level_rules.meg)\nconst megCalibrationData = buildRegExp(file_level_rules.meg_calbibration)\nconst megCrosstalkData = buildRegExp(file_level_rules.meg_crosstalk)\nconst stimuliData = buildRegExp(file_level_rules.stimuli)\nconst petData = buildRegExp(file_level_rules.pet)\nconst petBlood = buildRegExp(file_level_rules.pet_blood)\nconst microscopyData = buildRegExp(file_level_rules.microscopy)\nconst microscopyPhotoData = buildRegExp(file_level_rules.microscopy_photo)\nconst microscopyJSON = buildRegExp(file_level_rules.microscopy_json)\nconst motion = buildRegExp(file_level_rules.motion)\nconst nirsData = buildRegExp(file_level_rules.nirs)\n// Phenotypic data\nconst phenotypicData = buildRegExp(phenotypic_rules.phenotypic_data)\n// Session level\nconst anatSes = buildRegExp(session_level_rules.anat_ses)\nconst dwiSes = buildRegExp(session_level_rules.dwi_ses)\nconst eegSes = buildRegExp(session_level_rules.eeg_ses)\nconst funcSes = buildRegExp(session_level_rules.func_ses)\nconst aslSes = buildRegExp(session_level_rules.asl_ses)\nconst ieegSes = buildRegExp(session_level_rules.ieeg_ses)\nconst megSes = buildRegExp(session_level_rules.meg_ses)\nconst scansSes = buildRegExp(session_level_rules.scans)\nconst petSes = buildRegExp(session_level_rules.pet_ses)\nconst motionSes = buildRegExp(session_level_rules.motion_ses)\nconst microscopySes = buildRegExp(session_level_rules.microscopy_ses)\nconst nirsSes = buildRegExp(session_level_rules.nirs_ses)\n// Subject level\nconst subjectLevel = buildRegExp(subject_level_rules.subject_level)\n// Top level\nconst rootTop = buildRegExp(top_level_rules.root_top)\nconst funcTop = buildRegExp(top_level_rules.func_top)\nconst aslTop = buildRegExp(top_level_rules.asl_top)\nconst anatTop = buildRegExp(top_level_rules.anat_top)\nconst vfaTop = buildRegExp(top_level_rules.VFA_top)\nconst megreTop = buildRegExp(top_level_rules.megre_mese_top)\nconst irt1Top = buildRegExp(top_level_rules.irt1_top)\nconst mpmTop = buildRegExp(top_level_rules.mpm_top)\nconst mtsTop = buildRegExp(top_level_rules.mts_top)\nconst mtrTop = buildRegExp(top_level_rules.mtr_top)\nconst mp2rageTop = buildRegExp(top_level_rules.mp2rage_top)\nconst dwiTop = buildRegExp(top_level_rules.dwi_top)\nconst eegTop = buildRegExp(top_level_rules.eeg_top)\nconst ieegTop = buildRegExp(top_level_rules.ieeg_top)\nconst multiDirFieldmap = buildRegExp(top_level_rules.multi_dir_fieldmap)\nconst otherTopFiles = buildRegExp(top_level_rules.other_top_files)\nconst megTop = buildRegExp(top_level_rules.meg_top)\nconst petTop = buildRegExp(top_level_rules.pet_top)\nconst motionTop = buildRegExp(top_level_rules.motion_top)\nconst microscopyTop = buildRegExp(top_level_rules.microscopy_top)\nconst nirsTop = buildRegExp(top_level_rules.nirs_top)\n\nexport default {\n /**\n * Is BIDS\n *\n * Check if a given path is valid within the\n * bids spec.\n */\n isBIDS: function (path) {\n return (\n this.file.isTopLevel(path) ||\n this.file.isStimuliData(path) ||\n this.file.isSessionLevel(path) ||\n this.file.isSubjectLevel(path) ||\n this.file.isAnat(path) ||\n this.file.isDWI(path) ||\n this.file.isFunc(path) ||\n this.file.isAsl(path) ||\n this.file.isMeg(path) ||\n this.file.isNIRS(path) ||\n this.file.isIEEG(path) ||\n this.file.isEEG(path) ||\n this.file.isBehavioral(path) ||\n this.file.isFieldMap(path) ||\n this.file.isPhenotypic(path) ||\n this.file.isPET(path) ||\n this.file.isPETBlood(path) ||\n this.file.isMOTION(path) ||\n this.file.isMicroscopy(path) ||\n this.file.isMicroscopyPhoto(path) ||\n this.file.isMicroscopyJSON(path)\n )\n },\n\n /**\n * Object with all file type checks\n */\n file: {\n /**\n * Check if the file has appropriate name for a top level file\n */\n isTopLevel: function (path) {\n if (bids_schema) {\n return (\n bids_schema.top_level_files.some((regex) => regex.exec(path)) ||\n funcTop.test(path) ||\n aslTop.test(path) ||\n dwiTop.test(path) ||\n anatTop.test(path) ||\n vfaTop.test(path) ||\n megreTop.test(path) ||\n irt1Top.test(path) ||\n mpmTop.test(path) ||\n mtsTop.test(path) ||\n mtrTop.test(path) ||\n mp2rageTop.test(path) ||\n multiDirFieldmap.test(path) ||\n otherTopFiles.test(path) ||\n megTop.test(path) ||\n eegTop.test(path) ||\n ieegTop.test(path) ||\n petTop.test(path) ||\n motionTop.test(path) ||\n nirsTop.test(path) ||\n microscopyTop.test(path)\n )\n } else {\n return (\n rootTop.test(path) ||\n funcTop.test(path) ||\n aslTop.test(path) ||\n dwiTop.test(path) ||\n anatTop.test(path) ||\n vfaTop.test(path) ||\n megreTop.test(path) ||\n irt1Top.test(path) ||\n mpmTop.test(path) ||\n mtsTop.test(path) ||\n mtrTop.test(path) ||\n mp2rageTop.test(path) ||\n multiDirFieldmap.test(path) ||\n otherTopFiles.test(path) ||\n megTop.test(path) ||\n eegTop.test(path) ||\n ieegTop.test(path) ||\n petTop.test(path) ||\n motionTop.test(path) ||\n nirsTop.test(path) ||\n microscopyTop.test(path)\n )\n }\n },\n\n /**\n * Check if file is a data file\n */\n isDatafile: function (path) {\n return (\n this.isAssociatedData(path) ||\n this.isTSV(path) ||\n this.isStimuliData(path) ||\n this.isPhenotypic(path) ||\n this.hasModality(path)\n )\n },\n /**\n * Check if file is appropriate associated data.\n */\n isAssociatedData: function (path) {\n return associatedData.test(path)\n },\n\n isTSV: function (path) {\n return path.endsWith('.tsv')\n },\n\n isContinousRecording: function (path) {\n return path.endsWith('.tsv.gz')\n },\n\n isStimuliData: function (path) {\n return stimuliData.test(path)\n },\n\n /**\n * Check if file is phenotypic data.\n */\n isPhenotypic: function (path) {\n return phenotypicData.test(path)\n },\n /**\n * Check if the file has appropriate name for a session level\n */\n isSessionLevel: function (path) {\n return (\n conditionalMatch(scansSes, path) ||\n conditionalMatch(funcSes, path) ||\n conditionalMatch(aslSes, path) ||\n conditionalMatch(anatSes, path) ||\n conditionalMatch(dwiSes, path) ||\n conditionalMatch(megSes, path) ||\n conditionalMatch(nirsSes, path) ||\n conditionalMatch(eegSes, path) ||\n conditionalMatch(ieegSes, path) ||\n conditionalMatch(petSes, path) ||\n conditionalMatch(motionSes, path) ||\n conditionalMatch(microscopySes, path)\n )\n },\n\n /**\n * Check if the file has appropriate name for a subject level\n */\n isSubjectLevel: function (path) {\n return subjectLevel.test(path)\n },\n\n /**\n * Check if the file has a name appropriate for an anatomical scan\n */\n isAnat: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['anat'].some((regex) => regex.exec(path))\n } else {\n return (\n conditionalMatch(anatNonparametric, path) ||\n conditionalMatch(anatParametric, path) ||\n conditionalMatch(anatDefacemask, path) ||\n conditionalMatch(anatMultiEcho, path) ||\n conditionalMatch(anatMultiFlip, path) ||\n conditionalMatch(anatMultiInv, path) ||\n conditionalMatch(anatMP2RAGE, path) ||\n conditionalMatch(anatVFAMT, path) ||\n conditionalMatch(anatMTR, path)\n )\n }\n },\n\n /**\n * Check if the file has a name appropriate for a diffusion scan\n */\n isDWI: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['dwi'].some((regex) => regex.exec(path))\n } else {\n return conditionalMatch(dwiData, path)\n }\n },\n\n /**\n * Check if the file has a name appropriate for a fieldmap scan\n */\n isFieldMap: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['fmap'].some((regex) => regex.exec(path))\n } else {\n return (\n conditionalMatch(fmapGre, path) ||\n conditionalMatch(fmapPepolarAsl, path) ||\n conditionalMatch(fmapTB1DAM, path) ||\n conditionalMatch(fmapTB1EPI, path) ||\n conditionalMatch(fmapTB1SRGE, path) ||\n conditionalMatch(fmapRF, path) ||\n conditionalMatch(fmapParametric, path)\n )\n }\n },\n\n isFieldMapMainNii: function (path) {\n return (\n !path.endsWith('.json') &&\n /* isFieldMap */\n (conditionalMatch(fmapGre, path) ||\n conditionalMatch(fmapPepolarAsl, path) ||\n conditionalMatch(fmapTB1DAM, path) ||\n conditionalMatch(fmapTB1EPI, path) ||\n conditionalMatch(fmapTB1SRGE, path) ||\n conditionalMatch(fmapRF, path) ||\n conditionalMatch(fmapParametric, path))\n )\n },\n\n /**\n * Check if the file has a name appropriate for a functional scan\n */\n isFunc: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['func'].some((regex) => regex.exec(path))\n } else {\n return (\n conditionalMatch(func, path) ||\n conditionalMatch(funcPhaseDeprecated, path) ||\n conditionalMatch(funcEvents, path) ||\n conditionalMatch(funcTimeseries, path)\n )\n }\n },\n\n isAsl: function (path) {\n return conditionalMatch(aslData, path)\n },\n\n isPET: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['pet'].some((regex) => regex.exec(path))\n } else {\n return conditionalMatch(petData, path)\n }\n },\n\n isPETBlood: function (path) {\n return conditionalMatch(petBlood, path)\n },\n\n isMeg: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['meg'].some((regex) => regex.exec(path))\n } else {\n return (\n conditionalMatch(megData, path) ||\n conditionalMatch(megCalibrationData, path) ||\n conditionalMatch(megCrosstalkData, path)\n )\n }\n },\n isNIRS: function (path) {\n return conditionalMatch(nirsData, path)\n },\n\n isEEG: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['eeg'].some((regex) => regex.exec(path))\n } else {\n return conditionalMatch(eegData, path)\n }\n },\n\n isIEEG: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['ieeg'].some((regex) => regex.exec(path))\n } else {\n return conditionalMatch(ieegData, path)\n }\n },\n\n isMOTION: function (path) {\n if (bids_schema) {\n // Motion not currently in schema\n // return bids_schema.datatypes['motion'].some(regex => regex.exec(path))\n return conditionalMatch(motion, path)\n } else {\n return conditionalMatch(motion, path)\n }\n },\n\n isMicroscopy: function (path) {\n return conditionalMatch(microscopyData, path)\n },\n\n isMicroscopyPhoto: function (path) {\n return conditionalMatch(microscopyPhotoData, path)\n },\n\n isMicroscopyJSON: function (path) {\n return conditionalMatch(microscopyJSON, path)\n },\n\n isBehavioral: function (path) {\n if (bids_schema) {\n return bids_schema.datatypes['beh'].some((regex) => regex.exec(path))\n } else {\n return conditionalMatch(behavioralData, path)\n }\n },\n\n isFuncBold: function (path) {\n return conditionalMatch(funcBoldData, path)\n },\n\n hasModality: function (path) {\n return (\n this.isAnat(path) ||\n this.isDWI(path) ||\n this.isFieldMap(path) ||\n this.isFieldMapMainNii(path) ||\n this.isFunc(path) ||\n this.isAsl(path) ||\n this.isMeg(path) ||\n this.isNIRS(path) ||\n this.isEEG(path) ||\n this.isIEEG(path) ||\n this.isBehavioral(path) ||\n this.isFuncBold(path) ||\n this.isPET(path) ||\n this.isPETBlood(path) ||\n this.isMicroscopy(path) ||\n this.isMicroscopyPhoto(path) ||\n this.isMicroscopyJSON(path) ||\n this.isMOTION(path)\n )\n },\n },\n\n checkType(obj, typeString) {\n if (typeString == 'number') {\n return !isNaN(parseFloat(obj)) && isFinite(obj)\n } else {\n return typeof obj == typeString\n }\n },\n\n /**\n * Get Path Values\n *\n * Takes a file path and returns and values\n * found for the following path keys.\n * sub-\n * ses-\n */\n getPathValues: function (path) {\n var values = {},\n match\n\n // capture subject\n match = /^\\/sub-([a-zA-Z0-9]+)/.exec(path)\n values.sub = match && match[1] ? match[1] : null\n\n // capture session\n match = /^\\/sub-[a-zA-Z0-9]+\\/ses-([a-zA-Z0-9]+)/.exec(path)\n values.ses = match && match[1] ? match[1] : null\n\n return values\n },\n\n // CommonJS default export\n schemaSetup,\n}\n\nfunction conditionalMatch(expression, path) {\n const match = expression.exec(path)\n\n // we need to do this because JS does not support conditional groups\n if (match) {\n if ((match[2] && match[3]) || !match[2]) {\n return true\n }\n }\n return false\n}\n\n/**\n * Insert tokens into RegExps from bids-validator-common\n */\nfunction buildRegExp(obj) {\n if (obj.tokens) {\n let regExp = obj.regexp\n const keys = Object.keys(obj.tokens)\n for (let key of keys) {\n const args = obj.tokens[key].join('|')\n regExp = regExp.replace(key, args)\n }\n return new RegExp(regExp)\n } else {\n return new RegExp(obj.regexp)\n }\n}\n" }, { "alpha_fraction": 0.7319148778915405, "alphanum_fraction": 0.7319148778915405, "avg_line_length": 32.57143020629883, "blob_id": "72b45f8df02745f968668475fdac7e49efd28340", "content_id": "66445113b4f052058003b204db71957c6cffc5fd", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 235, "license_type": "permissive", "max_line_length": 78, "num_lines": 7, "path": "/bids-validator/src/utils/errors.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export class SchemaStructureError extends Error {\n constructor(schemaPath) {\n super(`Validator attempted to access ${schemaPath}, but it wasn't there.`)\n this.name = 'SchemaStructureError'\n this.schemaPath = schemaPath\n }\n}\n" }, { "alpha_fraction": 0.658450722694397, "alphanum_fraction": 0.658450722694397, "avg_line_length": 36.86666488647461, "blob_id": "a421dc90f5d0e7e7e873293c75385ea9d2ce4d13", "content_id": "bfcd81799c17589a60fbb9805e6e84d99ca7cc6d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 568, "license_type": "permissive", "max_line_length": 77, "num_lines": 15, "path": "/bids-validator/src/validators/isBidsy.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Not sure if we want this yet. Would be to create issues for non standard\n * derivatives to have the lowest common denomenator of bids like file names.\n */\n// @ts-nocheck\nimport { SEP } from '../deps/path.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport { CheckFunction } from '../../types/check.ts'\nimport { BIDSFile } from '../types/file.ts'\nimport { Schema } from '../types/schema.ts'\n\nexport const isBidsyFilename: CheckFunction = (schema, context) => {\n // every '.', '-', '_' followed by an alnum\n // only contains '.', '-', '_' and alnum\n}\n" }, { "alpha_fraction": 0.621004581451416, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 18.909090042114258, "blob_id": "34604b7b92c123f3d24238518db25a659fdfe64f", "content_id": "1d810c5f64c7c3ceb6776a3fc88d8cb60fc1ac87", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 219, "license_type": "permissive", "max_line_length": 69, "num_lines": 11, "path": "/bids-validator/src/deps/path.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export {\n relative,\n resolve,\n join,\n basename,\n dirname,\n extname,\n fromFileUrl,\n parse,\n} from 'https://deno.land/[email protected]/path/mod.ts'\nexport { SEP } from 'https://deno.land/[email protected]/path/separator.ts'\n" }, { "alpha_fraction": 0.5255767107009888, "alphanum_fraction": 0.5406218767166138, "avg_line_length": 22.186046600341797, "blob_id": "4285f6a5f0b84be82e8676d0a5994977541c4e1b", "content_id": "0f9fe63df4f35ecb9b7c7e6eff487602dd896a32", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 997, "license_type": "permissive", "max_line_length": 90, "num_lines": 43, "path": "/bids-validator/validators/tsv/__tests__/checkMotionComponent.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport checkMotionComponent from '../checkMotionComponent'\n\ndescribe('checkMotionComponent', () => {\n it('returns no issue if components is in correct location and values are valid', () => {\n const issues = []\n checkMotionComponent(\n [\n ['test', 'component'],\n [0, 'x'],\n ],\n 'testfile.tsv',\n issues,\n )\n assert.lengthOf(issues, 0)\n })\n it('returns issue if components are in wrong column', () => {\n const issues = []\n checkMotionComponent(\n [\n ['component', 'test'],\n ['x', 0],\n ],\n 'testfile.tsv',\n issues,\n )\n assert.lengthOf(issues, 1)\n assert(issues[0].code === 235)\n })\n it('returns issue if components are incorrect value', () => {\n const issues = []\n checkMotionComponent(\n [\n ['test', 'component'],\n [0, 0],\n ],\n 'testfile.tsv',\n issues,\n )\n assert.lengthOf(issues, 1)\n assert(issues[0].code === 236)\n })\n})\n" }, { "alpha_fraction": 0.5927083492279053, "alphanum_fraction": 0.5989583134651184, "avg_line_length": 23.615385055541992, "blob_id": "f5bd3ea475bb088749fa30d434ca0512355be457", "content_id": "82d6d36739b7fb1e278b12736bc825504df79254", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 960, "license_type": "permissive", "max_line_length": 71, "num_lines": 39, "path": "/bids-validator/src/types/filetree.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Abstract FileTree for all environments (Deno, Browser, Python)\n */\nimport { BIDSFile } from '../types/file.ts'\n\nexport class FileTree {\n // Relative path to this FileTree location\n path: string\n // Name of this directory level\n name: string\n files: BIDSFile[]\n directories: FileTree[]\n parent?: FileTree\n\n constructor(path: string, name: string, parent?: FileTree) {\n this.path = path\n this.files = []\n this.directories = []\n this.name = name\n this.parent = parent\n }\n\n contains(parts: string[]): boolean {\n if (parts.length === 0) {\n return false\n } else if (parts.length === 1) {\n return this.files.some((x) => x.name === parts[0])\n } else if (parts.length > 1) {\n const nextDir = this.directories.find((x) => x.name === parts[0])\n if (nextDir) {\n return nextDir.contains(parts.slice(1, parts.length))\n } else {\n return false\n }\n } else {\n return false\n }\n }\n}\n" }, { "alpha_fraction": 0.5224192142486572, "alphanum_fraction": 0.5349321961402893, "avg_line_length": 19.84782600402832, "blob_id": "90a907799e8cd81b55a53e02f9922d28aaf5b35f", "content_id": "07a967ae5c35e5148b02c08ad1a9daacc8b2b7b4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 959, "license_type": "permissive", "max_line_length": 67, "num_lines": 46, "path": "/bids-validator/validators/tsv/checkMotionComponent.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nconst componentEnum = [\n 'x',\n 'y',\n 'z',\n 'quat_x',\n 'quat_y',\n 'quat_z',\n 'quat_w',\n 'n/a',\n]\n\nexport const checkMotionComponent = function (rows, file, issues) {\n const header = rows[0]\n const componentIndex = header.indexOf('component')\n if (componentIndex != 1) {\n issues.push(\n new Issue({\n file: file,\n evidence: header.join(','),\n line: 0,\n reason: `Component found on column ${componentIndex + 1}.`,\n code: 235,\n }),\n )\n }\n\n for (let a = 1; a < rows.length; a++) {\n const line = rows[a]\n const component = line[componentIndex]\n if (!componentEnum.includes(component)) {\n issues.push(\n new Issue({\n file: file,\n evidence: line.join(','),\n line: a + 1,\n reason: `Found value ${component}`,\n code: 236,\n }),\n )\n }\n }\n}\n\nexport default checkMotionComponent\n" }, { "alpha_fraction": 0.5823673009872437, "alphanum_fraction": 0.5909090638160706, "avg_line_length": 31.13725471496582, "blob_id": "ca1eca5b60cd5a80e3f4d416b91601ef68a22b63", "content_id": "f5779f5ca97e1fb1231e4958f5ec36a7ea27ff29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3278, "license_type": "permissive", "max_line_length": 104, "num_lines": 102, "path": "/bids-validator/tests/utils/files-web.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * eslint no-console: [\"error\", { allow: [\"log\"] }]\n * @jest-environment jsdom\n */\n\nimport assert from 'assert'\n\nimport utils from '../../../bids-validator/utils'\nimport groupFileTypes from '../../../bids-validator/validators/bids/groupFileTypes'\nimport checkReadme from '../../../bids-validator/validators/bids/checkReadme.js'\nimport checkDatasetDescription from '../../../bids-validator/validators/bids/checkDatasetDescription.js'\nimport validateMisc from '../../../bids-validator/utils/files/validateMisc'\nimport { createFileList } from '../env/FileList'\n\ndescribe('files in browser', () => {\n describe('files utils in nodejs', () => {\n describe('FileAPI', () => {\n it('should not return a mock implementation', () => {\n let File = utils.files.FileAPI()\n assert(File.name !== 'NodeFile')\n })\n })\n })\n\n describe('files utils in browsers', () => {\n describe('newFile', () => {\n it('creates a new File API object', () => {\n const test_file = utils.files.newFile('test-file')\n assert.equal(test_file.name, 'test-file')\n assert(File.prototype.isPrototypeOf(test_file))\n })\n })\n })\n\n describe('dataset_description.json', () => {\n it('throws warning if it does not exist in proper location', () => {\n const fileList = {}\n const issues = checkDatasetDescription(fileList)\n assert(issues[0].key === 'DATASET_DESCRIPTION_JSON_MISSING')\n })\n })\n\n describe('README', () => {\n it('throws warning if it does not exist in proper location', () => {\n const fileList = {\n 1: {\n name: 'README',\n path: 'tests/data/bids-examples/ds001/not-root-dir/README',\n relativePath: '/not-root-dir/README',\n },\n }\n const issues = checkReadme(fileList)\n assert(issues[0].key === 'README_FILE_MISSING')\n })\n\n it('throws warning if it is too small', () => {\n const fileList = {\n 1: {\n name: 'README',\n path: 'tests/data/bids-examples/ds001/README',\n relativePath: '/README',\n size: 20,\n },\n }\n const issues = checkReadme(fileList)\n assert(issues[0].key === 'README_FILE_SMALL')\n })\n })\n\n describe('validateMisc', () => {\n let filelist = [],\n dir\n\n beforeAll(() => {\n // contains stripped down CTF format dataset: Both, BadChannels and\n // bad.segments files can be empty and still valid. Everything else must\n // not be empty.\n dir = `${process.cwd()}/bids-validator/tests/data/empty_files`\n })\n\n // generate an array of browser Files\n beforeEach(() => {\n filelist = createFileList(dir)\n })\n\n it('returns issues for empty files (0kb), accepting a limited set of exceptions', (done) => {\n const files = groupFileTypes(filelist, {})\n\n validateMisc(files.misc).then((issues) => {\n // *.meg4 and BadChannels files are empty. But only *.meg4 is an issue\n assert.ok(issues.length == 1)\n assert.ok(issues.every((issue) => issue instanceof utils.issues.Issue))\n assert.notStrictEqual(\n issues.findIndex((issue) => issue.code === 99),\n -1,\n )\n assert.ok(issues[0].file.name == 'sub-0001_task-AEF_run-01_meg.meg4')\n done()\n })\n })\n })\n})\n" }, { "alpha_fraction": 0.6898179650306702, "alphanum_fraction": 0.6972353458404541, "avg_line_length": 35.17073059082031, "blob_id": "8b952c7cf87600996505913dda90d782b4807348", "content_id": "f26a5419a3e5abd5173468e27e4620a18d29d567", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1483, "license_type": "permissive", "max_line_length": 80, "num_lines": 41, "path": "/bids-validator/src/schema/entities.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from '../deps/asserts.ts'\nimport { readEntities } from './entities.ts'\nimport { nullReadBytes } from '../tests/nullReadBytes.ts'\nimport { generateBIDSFilename } from '../tests/generate-filenames.ts'\n\nDeno.test('test readEntities', (t) => {\n const testFile = {\n name: 'task-rhymejudgment_bold.json',\n path: '/task-rhymejudgment_bold.json',\n size: null as unknown as number,\n ignored: false,\n stream: null as unknown as ReadableStream<Uint8Array>,\n text: () => Promise.resolve(''),\n readBytes: nullReadBytes,\n }\n const context = readEntities(testFile.name)\n assert(context.suffix === 'bold', 'failed to match suffix')\n assert(context.extension === '.json', 'failed to match extension')\n assert(context.entities.task === 'rhymejudgment', 'failed to match extension')\n})\n\nDeno.test('test readEntities performance', (t) => {\n const generateStart = performance.now()\n const testFilenames = []\n for (let n = 0; n < 200000; n++) {\n testFilenames.push(generateBIDSFilename(Math.floor(Math.random() * 4)))\n }\n const generateEnd = performance.now()\n const normalizePerf = generateEnd - generateStart\n\n const start = performance.now()\n for (const each of testFilenames) {\n readEntities(each)\n }\n const end = performance.now()\n const readEntitiesTime = end - start\n\n const perfRatio = readEntitiesTime / normalizePerf + Number.EPSILON\n console.log(`readEntities() runtime ratio: ${perfRatio.toFixed(2)}`)\n assert(perfRatio < 2)\n})\n" }, { "alpha_fraction": 0.5893909335136414, "alphanum_fraction": 0.5933202505111694, "avg_line_length": 30.163265228271484, "blob_id": "170082483cb075258bd5085dd6d33a7d636fcb54", "content_id": "cc9b50e0c23103e52de814b066e545de365e894d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1527, "license_type": "permissive", "max_line_length": 75, "num_lines": 49, "path": "/bids-validator/src/tests/schema-expression-language.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { loadSchema } from '../setup/loadSchema.ts'\nimport { Table } from '../deps/cliffy.ts'\nimport { colors } from '../deps/fmt.ts'\nimport { BIDSContext } from '../schema/context.ts'\nimport { assert, assertEquals } from '../deps/asserts.ts'\nimport { evalCheck } from '../schema/applyRules.ts'\n\nconst schema = await loadSchema()\nconst pretty_null = (x: string | null): string => (x === null ? 'null' : x)\n\nDeno.test('validate schema expression tests', async (t) => {\n const results: string[][] = []\n const header = ['expression', 'desired', 'actual', 'result'].map((x) =>\n colors.magenta(x),\n )\n for (const test of schema.meta.expression_tests) {\n await t.step(`${test.expression} evals to ${test.result}`, () => {\n const actual_result = evalCheck(test.expression, {} as BIDSContext)\n if (actual_result == test.result) {\n results.push([\n colors.cyan(test.expression),\n pretty_null(test.result),\n pretty_null(actual_result),\n colors.green('pass'),\n ])\n } else {\n results.push([\n colors.cyan(test.expression),\n pretty_null(test.result),\n pretty_null(actual_result),\n colors.red('fail'),\n ])\n }\n assertEquals(actual_result, test.result)\n })\n }\n results.sort((a, b) => {\n return a[3].localeCompare(b[3])\n })\n const table = new Table()\n .header(header)\n .border(false)\n .body(results)\n .padding(1)\n .indent(2)\n .maxColWidth(40)\n .toString()\n console.log(table)\n})\n" }, { "alpha_fraction": 0.6566163897514343, "alphanum_fraction": 0.6633166074752808, "avg_line_length": 38.79999923706055, "blob_id": "7af875a4c856da81c2c9c074a4565a15bc782684", "content_id": "9e0fbe886f54b05bb41da7f353c630f1c6cc74ef", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 597, "license_type": "permissive", "max_line_length": 79, "num_lines": 15, "path": "/bids-validator/src/summary/summary.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { computeModalities, modalityPrettyLookup, Summary } from './summary.ts'\nimport { assertEquals, assertObjectMatch } from '../deps/asserts.ts'\n\nDeno.test('Summary class and helper functions', async (t) => {\n await t.step('Constructor succeeds', () => {\n new Summary()\n })\n await t.step('computeModalities properly sorts modality counts', () => {\n const modalitiesIn = { eeg: 5, pet: 6, mri: 6, ieeg: 6 }\n const modalitiesOut = ['pet', 'ieeg', 'mri', 'eeg'].map(\n (x) => modalityPrettyLookup[x],\n )\n assertEquals(computeModalities(modalitiesIn), modalitiesOut)\n })\n})\n" }, { "alpha_fraction": 0.5651504993438721, "alphanum_fraction": 0.6400647163391113, "avg_line_length": 38.669795989990234, "blob_id": "ee405ad8e0c07f632d43123b661c27a7425b6de1", "content_id": "3770df0de95a118382e48c053e4158d253ba4175", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 25349, "license_type": "permissive", "max_line_length": 145, "num_lines": 639, "path": "/bids-validator/tests/type.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport utils from '../utils'\nimport BIDS from '../validators/bids'\n\ndescribe('utils.type.file.isAnat', function () {\n const goodFilenames = [\n '/sub-15/anat/sub-15_inplaneT2.nii.gz',\n '/sub-15/ses-12/anat/sub-15_ses-12_inplaneT2.nii.gz',\n '/sub-16/anat/sub-16_T1w.nii.gz',\n '/sub-16/anat/sub-16_T1w.json',\n '/sub-16/anat/sub-16_run-01_T1w.nii.gz',\n '/sub-16/anat/sub-16_acq-highres_T1w.nii.gz',\n '/sub-16/anat/sub-16_rec-mc_T1w.nii.gz',\n '/sub-16/anat/sub-16_ce-contrastagent_T1w.nii.gz',\n '/sub-16/anat/sub-16_part-mag_T1w.nii.gz',\n '/sub-16/anat/sub-16_T1map.nii.gz',\n '/sub-16/anat/sub-16_mod-T1w_defacemask.nii.gz',\n '/sub-16/anat/sub-16_echo-1_MESE.nii.gz',\n '/sub-16/anat/sub-16_flip-1_VFA.nii.gz',\n '/sub-16/anat/sub-16_inv-1_IRT1.nii.gz',\n '/sub-16/anat/sub-16_flip-1_inv-1_MP2RAGE.nii.gz',\n '/sub-16/anat/sub-16_flip-1_mt-on_MPM.nii.gz',\n '/sub-16/anat/sub-16_mt-on_part-real_MTR.nii.gz',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isAnat('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isAnat(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-1/anat/sub-15_inplaneT2.nii.gz',\n '/sub-15/ses-12/anat/sub-15_inplaneT2.nii.gz',\n '/sub-16/anat/sub-16_T1.nii.gz',\n 'blaaa.nii.gz',\n '/sub-16/anat/sub-16_run-second_T1w.nii.gz',\n '/sub-16/anat/sub-16_run-01_rec-mc_T1w.nii.gz',\n '/sub-16/anat/sub-16_part-magnitude_T1w.nii.gz',\n '/sub-16/anat/sub-16_part-mag_T1map.nii.gz',\n '/sub-16/anat/sub-16_mod-T1weighted_defacemask.nii.gz',\n '/sub-16/anat/sub-16_MESE.nii.gz',\n '/sub-16/anat/sub-16_VFA.nii.gz',\n '/sub-16/anat/sub-16_IRT1.nii.gz',\n '/sub-16/anat/sub-16_flip-1_MP2RAGE.nii.gz',\n '/sub-16/anat/sub-16_flip-1_mt-fail_MPM.nii.gz',\n '/sub-16/anat/sub-16_flip-1_mt-fail_part-real_MTR.nii.gz',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isAnat('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isAnat(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isFunc', function () {\n var goodFilenames = [\n '/sub-15/func/sub-15_task-0back_bold.nii.gz',\n '/sub-15/ses-12/func/sub-15_ses-12_task-0back_bold.nii.gz',\n '/sub-16/func/sub-16_task-0back_bold.json',\n '/sub-16/func/sub-16_task-0back_run-01_bold.nii.gz',\n '/sub-16/func/sub-16_task-0back_acq-highres_bold.nii.gz',\n '/sub-16/func/sub-16_task-0back_rec-mc_bold.nii.gz',\n '/sub-16/func/sub-16_task-0back_run-01_phase.nii.gz',\n '/sub-16/func/sub-16_task-0back_echo-1_phase.nii.gz',\n '/sub-15/func/sub-15_task-0back_part-phase_bold.nii.gz',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isFunc('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isFunc(path), true)\n isdone()\n })\n })\n\n var badFilenames = [\n '/sub-1/func/sub-15_inplaneT2.nii.gz',\n '/sub-15/ses-12/func/sub-15_inplaneT2.nii.gz',\n '/sub-16/func/sub-16_T1.nii.gz',\n 'blaaa.nii.gz',\n '/sub-16/func/sub-16_run-second_T1w.nii.gz',\n '/sub-16/func/sub-16_task-0-back_rec-mc_bold.nii.gz',\n '/sub-16/func/sub-16_run-01_rec-mc_T1w.nii.gz',\n '/sub-16/func/sub-16_task-0back_part-magnitude_bold.nii.gz',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isFunc('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isFunc(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isTopLevel', function () {\n const goodFilenames = [\n '/README',\n '/CHANGES',\n '/LICENSE',\n '/dataset_description.json',\n '/ses-pre_task-rest_bold.json',\n '/dwi.bval',\n '/dwi.bvec',\n '/T1w.json',\n '/acq-test_dwi.json',\n '/rec-test_physio.json',\n '/task-testing_eeg.json',\n '/task-testing_ieeg.json',\n '/task-testing_meg.json',\n '/events.json',\n '/scans.json',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isTopLevel('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isTopLevel(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/readme.txt',\n '/changelog',\n '/license.txt',\n '/dataset_description.yml',\n '/ses.json',\n '/_T1w.json',\n '/_dwi.json',\n '/_task-test_physio.json',\n // cross-talk and fine-calibration files for Neuromag/Elekta/MEGIN data (.fif)\n // must be defined at file level.\n '/acq-calibration_meg.dat',\n '/acq-crosstalk_meg.fif',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isTopLevel('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isTopLevel(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isSubjectLevel', () => {\n const goodFilenames = [] // to be extended in the future...\n\n goodFilenames.forEach((path) => {\n it(\"isSubjectLevel('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isSubjectLevel(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n // cross-talk and fine-calibration files for Neuromag/Elekta/MEGIN data (.fif)\n // must be placed on file level.\n '/sub-12/sub-12_acq-calibration_meg.dat',\n '/sub-12/sub-12_acq-crosstalk_meg.fif',\n '/sub-12/acq-calibration_meg.dat',\n '/sub-12/acq-crosstalk_meg.fif',\n '/sub-12/acq-calibration.dat',\n '/sub-12/acq-crosstalk.fif',\n ]\n\n badFilenames.forEach((path) => {\n it(\"isSubjectLevel('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isSubjectLevel(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isSessionLevel', function () {\n const goodFilenames = [\n '/sub-12/sub-12_scans.tsv',\n '/sub-12/sub-12_scans.json',\n '/sub-12/ses-pre/sub-12_ses-pre_scans.tsv',\n '/sub-12/ses-pre/sub-12_ses-pre_scans.json',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isSessionLevel('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isSessionLevel(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-12/sub-12.tsv',\n '/sub-12/ses-pre/sub-12_ses-pre_scan.tsv',\n // cross-talk and fine-calibration files for Neuromag/Elekta/MEGIN data (.fif)\n // must be placed at file level.\n '/sub-12/sub-12_acq-calibration_meg.dat',\n '/sub-12/sub-12_acq-crosstalk_meg.fif',\n '/sub-12/ses-pre/sub-12_ses-pre_acq-calibration_meg.dat',\n '/sub-12/ses-pre/sub-12_ses-pre_acq-crosstalk_meg.fif',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isSessionLevel('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isSessionLevel(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isDWI', function () {\n const goodFilenames = [\n '/sub-12/dwi/sub-12_dwi.nii.gz',\n '/sub-12/dwi/sub-12_dwi.json',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.nii.gz',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.bvec',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.bval',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.json',\n '/sub-12/dwi/sub-12_sbref.nii.gz',\n '/sub-12/dwi/sub-12_sbref.json',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_sbref.nii.gz',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_sbref.json',\n '/sub-12/dwi/sub-12_part-mag_sbref.json',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isDWI('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isDWI(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-12/sub-12.tsv',\n '/sub-12/ses-pre/sub-12_ses-pre_scan.tsv',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.bvecs',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_dwi.bvals',\n '/sub-12/dwi/sub-12_sbref.bval',\n '/sub-12/dwi/sub-12_sbref.bvec',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_sbref.bval',\n '/sub-12/ses-pre/dwi/sub-12_ses-pre_sbref.bvec',\n '/sub-12/dwi/sub-12_part-magnitude_sbref.json',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isDWI('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isDWI(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isMEG', function () {\n const goodFilenames = [\n // Metadata MEG files\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.json',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_channels.tsv',\n // Father directory files are fine for some file formats:\n // Father dir: CTF data with a .ds ... the contents within .ds are not checked\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.ds/catch-alp-good-f.meg4',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.ds/xyz',\n // Father dir: BTi/4D ... again: within contents not checked\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/config',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/hs_file',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/e,rfhp1.0Hz.COH',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/c,rfDC',\n // NO father dir: KRISS data\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_run-01_meg.chn',\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_run-01_meg.kdf',\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_run-01_meg.trg',\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_digitizer.txt',\n // NO father dir: KIT data\n '/sub-01/ses-001/meg/sub-01_ses-001_markers.sqd',\n '/sub-01/ses-001/meg/sub-01_ses-001_markers.mrk',\n '/sub-01/ses-001/meg/sub-01_ses-001_meg.sqd',\n '/sub-01/ses-001/meg/sub-01_ses-001_meg.con',\n // NO father dir: ITAB data\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_run-01_meg.raw',\n '/sub-control01/ses-001/meg/sub-control01_ses-001_task-rest_run-01_meg.raw.mhd',\n // NO father dir: fif data\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_split-01_meg.fif',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_acq-TEST_run-01_split-01_meg.fif',\n // cross-talk and fine-calibration files for Neuromag/Elekta/MEGIN data (.fif)\n '/sub-01/meg/sub-01_acq-calibration_meg.dat',\n '/sub-01/meg/sub-01_acq-crosstalk_meg.fif',\n '/sub-01/ses-001/meg/sub-01_ses-001_acq-calibration_meg.dat',\n '/sub-01/ses-001/meg/sub-01_ses-001_acq-crosstalk_meg.fif',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isMeg('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isMeg(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n // missing session directory\n '/sub-01/meg/sub-01_ses-001_task-rest_run-01_meg.json',\n // subject not matching\n '/sub-01/ses-001/meg/sub-12_ses-001_task-rest_run-01_split-01_meg.fif',\n // invalid file endings\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.tsv',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.bogus',\n // wrong order of entities: https://github.com/bids-standard/bids-validator/issues/767\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_acq-TEST_split-01_meg.fif',\n // only parent directory name matters for BTi and CTF systems\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meggg/config',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg.dd/xyz',\n // KIT with a father dir ... should not have a father dir\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_markers.sqd',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_markers.con',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.sqd',\n // FIF with a father dir ... should not have a father dir\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_meg.fif',\n // ITAB with a father dir ... should not have a father dir\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.raw',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.raw.mhd',\n // KRISS with a father dir ... should not have a father dir\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.kdf',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.trg',\n '/sub-01/ses-001/meg/sub-01_ses-001_task-rest_run-01_meg/sub-01_ses-001_task-rest_run-01_meg.chn',\n // cross-talk and fine-calibration files for Neuromag/Elekta/MEGIN data (.fif)\n // .dat in MEG only allowed for \"acq-calibration\"\n '/acq-notcalibration_meg.dat',\n '/sub-01/ses-001/meg/sub-01_ses-001_acq-notcalibration_meg.dat',\n '/sub-01/ses-001/meg/sub-01_ses-001_acq-crosstalk_meg.dat',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isMeg('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isMeg(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isEEG', function () {\n const goodFilenames = [\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.json',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_events.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_split-01_eeg.edf',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.eeg',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.vmrk',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.vhdr',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.bdf',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.set',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.fdt',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_channels.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_electrodes.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_space-CapTrak_electrodes.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_coordsystem.json',\n '/sub-01/ses-001/eeg/sub-01_ses-001_space-CapTrak_coordsystem.json',\n '/sub-01/ses-001/eeg/sub-01_ses-001_photo.jpg',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isEEG('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isEEG(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-01/eeg/sub-01_ses-001_task-rest_run-01_eeg.json',\n '/sub-01/ses-001/eeg/sub-12_ses-001_task-rest_run-01_split-01_eeg.edf',\n '/sub-01/ses-001/eeg/sub-01_ses-001_task-rest_run-01_eeg.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_space-BOGUS_electrodes.tsv',\n '/sub-01/ses-001/eeg/sub-01_ses-001_space-BOGUS_coordsystem.json',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isEEG('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isEEG(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isIEEG', function () {\n const goodFilenames = [\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_ieeg.json',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.edf',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.vhdr',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.vmrk',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.eeg',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.set',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.fdt',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.nwb',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.mefd/sub-01_ses-001_task-rest_run-01_ieeg.rdat',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.mefd/sub-01_ses-001_task-rest_run-01_ieeg.ridx',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.mefd/CH1.timd/CH1-000000.segd/sub-01_ses-001_task-rest_run-01_ieeg.tdat',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.mefd/CH1.timd/CH1-000000.segd/sub-01_ses-001_task-rest_run-01_ieeg.idx',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_split-01_ieeg.mefd/CH1.timd/CH1-000000.segd/sub-01_ses-001_task-rest_run-01_ieeg.tmet',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_channels.tsv',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_space-fsaverage_electrodes.tsv',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_space-fsaverage_coordsystem.json',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isIEEG('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isIEEG(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-01/ieeg/sub-01_ses-001_task-rest_run-01_ieeg.json',\n '/sub-01/ses-001/ieeg/sub-12_ses-001_task-rest_run-01_split-01_ieeg.fif',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_task-rest_run-01_ieeg.tsv',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_space-fsavg_electrodes.tsv',\n '/sub-01/ses-001/ieeg/sub-01_ses-001_space-fsavg_coordsystem.json',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isIEEG('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isIEEG(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isPhenotypic', function () {\n it('should allow .tsv and .json files in the /phenotype directory', function () {\n assert(utils.type.file.isPhenotypic('/phenotype/acds_adult.json'))\n assert(utils.type.file.isPhenotypic('/phenotype/acds_adult.tsv'))\n })\n\n it('should not allow non .tsv and .json files in the /phenotype directory', function () {\n assert(!utils.type.file.isPhenotypic('/phenotype/acds_adult.jpeg'))\n assert(!utils.type.file.isPhenotypic('/phenotype/acds_adult.gif'))\n })\n})\n\ndescribe('utils.type.file.isAssociatedData', function () {\n it('should return false for unknown root directories', function () {\n var badFilenames = ['/images/picture.jpeg', '/temporary/test.json']\n\n badFilenames.forEach(function (path) {\n assert.equal(utils.type.file.isAssociatedData(path), false)\n })\n })\n\n it('should return true for associated data directories and any files within', function () {\n var goodFilenames = [\n '/code/test-script.py',\n '/derivatives/sub-01_QA.pdf',\n '/sourcedata/sub-01_ses-01_bold.dcm',\n '/stimuli/text.pdf',\n ]\n\n goodFilenames.forEach(function (path) {\n assert(utils.type.file.isAssociatedData(path))\n })\n })\n})\n\ndescribe('utils.type.file.isStimuliData', function () {\n it('should return false for unknown root directories', function () {\n var badFilenames = ['/images/picture.jpeg', '/temporary/test.json']\n\n badFilenames.forEach(function (path) {\n assert.equal(utils.type.file.isStimuliData(path), false)\n })\n })\n\n it('should return true for stimuli data directories and any files within', function () {\n var goodFilenames = ['/stimuli/sub-01/mov.avi', '/stimuli/text.pdf']\n\n goodFilenames.forEach(function (path) {\n assert(utils.type.file.isStimuliData(path))\n })\n })\n})\n\ndescribe('utils.type.getPathValues', function () {\n it('should return the correct path values from a valid file path', function () {\n assert.equal(\n utils.type.getPathValues(\n '/sub-22/ses-1/func/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n ).sub,\n 22,\n )\n assert.equal(\n utils.type.getPathValues(\n '/sub-22/ses-1/func/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n ).ses,\n 1,\n )\n assert.equal(\n utils.type.getPathValues(\n '/sub-22/func/sub-22_task-rest_acq-prefrontal_physio.tsv.gz',\n ).sub,\n 22,\n )\n assert.equal(\n utils.type.getPathValues(\n '/sub-22/func/sub-22_task-rest_acq-prefrontal_physio.tsv.gz',\n ).ses,\n null,\n )\n })\n})\n\ndescribe('utils.type.file.isPET', function () {\n const goodFilenames = [\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_pet.json',\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_pet.nii',\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_pet.nii.gz',\n '/sub-03/ses-01/pet/sub-02_ses-40_task-30_pet.json',\n '/sub-03/ses-01/pet/sub-02_ses-40_pet.nii',\n '/sub-03/ses-01/pet/sub-02_ses-40_pet.nii.gz',\n '/sub-03/pet/sub-02_pet.nii',\n '/sub-03/pet/sub-02_pet.nii.gz',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isPET('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isPET(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_pet+json',\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_pet.json.gz',\n '/sub-1/ses-1/pet/sub-1ses-1_task-1_trc-1_rec-1_run-1_pet.nii',\n 'sub-1/ses-1/pet/sub-1ses-1_task-1_trc-1_rec-1_run-1_pet.nii',\n '/sub-1/ses-1/pet/sub-1/ses-1_task-1_trc-1_rec-1_run-q_pet.csv',\n '/sub-1/ses-1/pet/sub-1/ses-1_task-1_trc-1_rec-1_run-q_recording-1_pet.nii',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isPET('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isPET(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isPETBlood', function () {\n const goodFilenames = [\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_recording-1_blood.json',\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_recording-1_blood.tsv',\n '/sub-03/ses-01/pet/sub-02_ses-40_task-30_recording-manual_blood.json',\n '/sub-03/ses-01/pet/sub-02_ses-40_recording-manual_blood.tsv',\n '/sub-03/pet/sub-02_recording-manual_blood.tsv',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isPETBlood('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isPETBlood(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-1/ses-1/pet/sub-1_ses-1_task-1_trc-1_rec-1_run-1_recording-1_blood+json',\n '/sub-1/ses-1/pet/sub-1ses-1_task-1_trc-1_rec-1_run-1_recording-1_blood.tsv',\n 'sub-1/ses-1/pet/sub-1ses-1_task-1_trc-1_rec-1_run-1_recording-1_blood.tsv',\n '/sub-1/ses-1/pet/sub-1/ses-1_task-1_trc-1_rec-1_run-q_recording-1_blood.csv',\n '/sub-1/ses-1/pet/sub-1/ses-1_task-1_trc-1_rec-1_run-q_recording-1_pet.tsv',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isPETBlood('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isPETBlood(path), false)\n isdone()\n })\n })\n})\n\ndescribe('utils.type.file.isMOTION', function () {\n const goodFilenames = [\n '/sub-01/motion/sub-01_task-rest_tracksys-unity_run-01_motion.tsv',\n '/sub-01/ses-walk/motion/sub-01_ses-walk_task-visual_tracksys-unity_motion.tsv',\n '/sub-01/ses-walk/motion/sub-01_ses-walk_task-visual_tracksys-unity_motion.json',\n '/sub-01/ses-walk/motion/sub-01_ses-walk_task-visual_tracksys-unity_channels.tsv',\n '/sub-01/ses-desktop/motion/sub-01_ses-desktop_task-rest_tracksys-unity_run-01_events.tsv',\n '/sub-01/ses-desktop/motion/sub-01_ses-desktop_task-rest_events.tsv',\n ]\n\n goodFilenames.forEach(function (path) {\n it(\"isMOTION('\" + path + \"') === true\", function (isdone) {\n assert.equal(utils.type.file.isMOTION(path), true)\n isdone()\n })\n })\n\n const badFilenames = [\n '/sub-01/motion/sub-01_ses-001_tracksys-unity_task-rest_run-01_motion.json',\n '/sub-01/ses-001/motion/sub-12_ses-001_task-rest_run-01_motion.tsv',\n '/sub-01/ses-walk/motion/sub-01_ses-walk_task-visual_channels.tsv',\n '/sub-01/ses-001/motion/sub-01_ses-001_run-01_motion.tsv',\n '/sub-01/motion/sub-01_task-walk_run-01_motion.tsv',\n ]\n\n badFilenames.forEach(function (path) {\n it(\"isMOTION('\" + path + \"') === false\", function (isdone) {\n assert.equal(utils.type.file.isMOTION(path), false)\n isdone()\n })\n })\n})\n\ndescribe('BIDS.subIDsesIDmismatchtest', function () {\n it(\"should return if sub and ses doesn't match\", function () {\n const files = {\n 0: {\n name: 'sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n path: 'tests/data/BIDS-examples-1.0.0-rc3u5/ds001/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n relativePath:\n 'ds001/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n },\n 1: {\n name: '/sub-22/ses-1/func/sub-23_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n path: 'tests/data/BIDS-examples-1.0.0-rc3u5/ds001/sub-22/ses-1/func/sub-23_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n relativePath:\n 'ds001/sub-22/ses-1/func/sub-23_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n },\n 2: {\n name: '/sub-22/ses-1/func/sub-22_ses-2_task-rest_acq-prefrontal_physio.tsv.gz',\n path: 'tests/data/BIDS-examples-1.0.0-rc3u5/ds001/sub-22/ses-1/func/sub-22_ses-2_task-rest_acq-prefrontal_physio.tsv.gz',\n relativePath:\n '/sub-22/ses-1/func/sub-22_ses-2_task-rest_acq-prefrontal_physio.tsv.gz',\n },\n 3: {\n name: '/sub-25/ses-2/func/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n path: 'tests/data/BIDS-examples-1.0.0-rc3u5/ds001/sub-25/ses-2/func/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n relativePath:\n 'ds001//sub-25/ses-2/func/sub-22_ses-1_task-rest_acq-prefrontal_physio.tsv.gz',\n },\n }\n const issues = BIDS.subIDsesIDmismatchtest(files)\n const code64_seen = issues.some((issue) => issue.code == '64')\n const code65_seen = issues.some((issue) => issue.code == '65')\n assert(code64_seen)\n assert(code65_seen)\n })\n})\n" }, { "alpha_fraction": 0.5551878809928894, "alphanum_fraction": 0.5762231349945068, "avg_line_length": 32.31496047973633, "blob_id": "8939e41a46ae86179ca54768d7194bc0d7d252f9", "content_id": "347723c5c8577c64f771b731a8ffb9df1eda11ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4231, "license_type": "permissive", "max_line_length": 101, "num_lines": 127, "path": "/bids-validator/validators/bids/__tests__/checkDatasetDescription.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport checkDatasetDescription from '../checkDatasetDescription'\n\ndescribe('checkDatasetDescription', () => {\n describe('checkNameAndAuthorsFields', () => {\n it('returns no issues with valid Name and Authors field', () => {\n const validJsonContentsDict = {\n '/dataset_description.json': {\n Name: 'Electric Boots',\n Authors: ['Benny', 'the Jets'],\n },\n }\n const issues = checkDatasetDescription(validJsonContentsDict)\n assert.lengthOf(issues, 0)\n })\n })\n describe('checkNameField', () => {\n it('returns code 115 when Name is empty', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {\n Name: '',\n },\n }\n const issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 115) > -1,\n 'issues include a code 115',\n )\n })\n it('returns code 115 when name only contains whitespace', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {\n Name: ' \\t\\r\\n\\f\\v\\u2003',\n },\n }\n const issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 115) > -1,\n 'issues include a code 115',\n )\n })\n it('returns no issues with one non-whitespace character', () => {\n const validJsonContentsDict = {\n '/dataset_description.json': {\n Name: ' \\u2708 ',\n Authors: ['Benny', 'the Jets'],\n },\n }\n const issues = checkDatasetDescription(validJsonContentsDict)\n assert.lengthOf(issues, 0)\n })\n })\n describe('checkAuthorField', () => {\n it('returns code 102 when there is only one author present', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {\n Authors: ['Benny'],\n },\n }\n const issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 102) > -1,\n 'issues include a code 102',\n )\n })\n it('returns code 103 when there an author has more than one comma', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {\n Authors: ['Benny, and the, Jets'],\n },\n }\n const issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 103) > -1,\n 'issues include a code 103',\n )\n })\n it('returns code 113 when there are no Authors', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {\n Authors: [],\n },\n }\n let issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 113) > -1,\n 'issues include a code 113',\n )\n\n const invalidJsonContentsDict2 = {\n '/dataset_description.json': {},\n }\n issues = checkDatasetDescription(invalidJsonContentsDict2)\n assert(\n issues.findIndex((issue) => issue.code === 113) > -1,\n 'issues include a code 113',\n )\n })\n })\n describe('checkGeneticDatabaseField', () => {\n it('returns code 128 when there is no Genetics.Dataset with a genetic_info.json present', () => {\n const invalidJsonContentsDict = {\n '/dataset_description.json': {},\n '/genetic_info.json': {},\n }\n let issues = checkDatasetDescription(invalidJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 128) > -1,\n 'issues include a code 128',\n )\n })\n it('does not return code 128 when GeneticDataset field and genetic_info.json present', () => {\n const validJsonContentsDict = {\n '/dataset_description.json': {\n Authors: ['Benny', 'the Jets'],\n Genetics: { Dataset: 'GeneticGeneticDataset' },\n },\n '/genetic_info.json': {},\n }\n let issues = checkDatasetDescription(validJsonContentsDict)\n assert(\n issues.findIndex((issue) => issue.code === 128) === -1,\n 'issues does not include a code 128',\n )\n })\n })\n})\n" }, { "alpha_fraction": 0.6420079469680786, "alphanum_fraction": 0.648612916469574, "avg_line_length": 28.115385055541992, "blob_id": "70692495ed2841276a722a233b12da903be0d6a6", "content_id": "d4059be43a8edbb0175a56d2a73da1fab12190a2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1514, "license_type": "permissive", "max_line_length": 79, "num_lines": 52, "path": "/tools/prep_zenodo.py", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport git\nimport json\nfrom subprocess import run, PIPE\nfrom pathlib import Path\n\n\ndef decommify(name):\n return ' '.join(name.split(', ')[::-1])\n\nblacklist = {\n 'dependabot[bot]',\n }\n\ngit_root = Path(git.Repo('.', search_parent_directories=True).working_dir)\nzenodo_file = git_root / '.zenodo.json'\n\nzenodo = json.loads(zenodo_file.read_text()) if zenodo_file.exists() else {}\n\norig_creators = zenodo.get('creators', [])\ncreator_map = {decommify(creator['name']): creator\n for creator in orig_creators}\n\nshortlog = run(['git', 'shortlog', '-ns'], stdout=PIPE)\ncounts = [line.split('\\t', 1)[::-1]\n for line in shortlog.stdout.decode().split('\\n') if line]\n\ncommit_counts = {}\nfor committer, commits in counts:\n commit_counts[committer] = commit_counts.get(committer, 0) + int(commits)\n\n# Stable sort:\n# Number of commits in reverse order\n# Ties broken by alphabetical order of first name\ncommitters = [committer\n for committer, _ in sorted(commit_counts.items(),\n key=lambda x: (-x[1], x[0]))\n if committer not in blacklist]\n\n# Tal to the top\nfirst_author = 'Ross Blair'\nif committers[0] != first_author:\n committers.remove(first_author)\n committers.insert(0, first_author)\n\ncreators = [\n creator_map.get(committer, {'name': committer})\n for committer in committers\n ]\n\nzenodo['creators'] = creators\nzenodo_file.write_text(json.dumps(zenodo, ensure_ascii=False, indent=2) + '\\n')\n" }, { "alpha_fraction": 0.636734664440155, "alphanum_fraction": 0.6632652878761292, "avg_line_length": 34, "blob_id": "cf2658b62e38daf917802a9ac971d31aef77ef5e", "content_id": "22cad0e813b6adee376629c02439f14df589d20b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 490, "license_type": "permissive", "max_line_length": 62, "num_lines": 14, "path": "/bids-validator/validators/tsv/__tests__/checkAge89.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { checkAge89 } from '../checkAge89'\n\ndescribe('checkAge89()', () => {\n it('returns evidence in the expected string format', () => {\n // Evidence should always be a human-readable string\n const issues = []\n const exampleParticipants = [['age'], [90]]\n const mockFile = {}\n checkAge89(exampleParticipants, mockFile, issues)\n expect(issues).toHaveLength(1)\n expect(issues[0]).toHaveProperty('evidence')\n expect(typeof issues[0].evidence).toBe('string')\n })\n})\n" }, { "alpha_fraction": 0.6188371181488037, "alphanum_fraction": 0.6293777823448181, "avg_line_length": 28.70707130432129, "blob_id": "2ee3560407f5a6ec885a370b9b90860d8d041ac5", "content_id": "ec0243e1dd3cc7f0f3b24d412832d98e8586d2e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2941, "license_type": "permissive", "max_line_length": 89, "num_lines": 99, "path": "/bids-validator/tests/cli.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import cli from '../cli'\nimport path from 'path'\n\nconst dir = process.cwd()\nconst data_dir = path.join(dir, 'bids-validator', 'tests', 'data')\nconst data_with_errors = path.join(data_dir, 'empty_files')\nconst data_without_errors = path.join(data_dir, 'valid_dataset')\n\nconst colorRegEx = new RegExp(\n // eslint-disable-next-line no-control-regex\n '[\\u001b\\u009b][[()#;?]*(?:[0-9]{1,4}(?:;[0-9]{0,4})*)?[0-9A-ORZcf-nqry=><]',\n)\n\nexpect.extend({\n toBeJSON: function (received) {\n try {\n JSON.parse(received)\n return {\n pass: true,\n }\n } catch (err) {\n return {\n pass: false,\n }\n }\n },\n})\n\nlet mockStdout\nlet mockStderr\nlet mockExit\nlet mockConsoleError\n\ndescribe('CLI', () => {\n beforeEach(() => {\n // bids-validator uses these\n mockStdout = jest\n .spyOn(process.stdout, 'write')\n .mockImplementation(() => true)\n mockStderr = jest\n .spyOn(process.stderr, 'write')\n .mockImplementation(() => true)\n // Yargs uses these\n mockExit = jest.spyOn(process, 'exit').mockImplementation(() => true)\n mockConsoleError = jest\n .spyOn(console, 'error')\n .mockImplementation(() => true)\n })\n afterEach(() => {\n mockStdout.mockRestore()\n mockStderr.mockRestore()\n mockExit.mockRestore()\n mockConsoleError.mockRestore()\n })\n it('should display usage hints when no arguments / options are provided', async () => {\n try {\n await cli(' ')\n } catch (code) {\n expect(code).toEqual(2)\n // 'jest' is the process name here but usually it is 'bids-validator'\n expect(mockConsoleError.mock.calls[0][0]).toEqual(\n expect.stringContaining('<dataset_directory> [options]'),\n )\n }\n })\n\n it('should accept a directory as the first argument without error', async () => {\n await expect(cli(data_without_errors)).resolves.toEqual(0)\n })\n\n it('without errors should exit with code 0', async () => {\n await expect(cli(`${data_without_errors} --json`)).resolves.toEqual(0)\n })\n\n it('with errors should not exit with code 0', async () => {\n await expect(cli(`${data_with_errors}`)).rejects.toEqual(1)\n })\n\n it('with errors should not exit with code 0 with --json argument', async () => {\n await expect(cli(`${data_with_errors} --json`)).rejects.toEqual(1)\n })\n\n it('should print valid json when the --json argument is provided', async () => {\n await expect(cli(`${data_without_errors} --json`)).resolves.toEqual(0)\n expect(mockStdout).toBeCalledWith(expect.toBeJSON())\n })\n\n it('should print with colors by default', async () => {\n await cli(`${data_without_errors}`)\n expect(mockStdout.mock.calls[0][0]).toMatch(colorRegEx)\n })\n\n it('should print without colors when NO_COLOR env set', async () => {\n process.env.NO_COLOR = 'any value'\n await cli(`${data_without_errors}`)\n expect(mockStdout.mock.calls[0][0]).not.toMatch(colorRegEx)\n delete process.env.NO_COLOR\n })\n})\n" }, { "alpha_fraction": 0.6400304436683655, "alphanum_fraction": 0.6484017968177795, "avg_line_length": 32.26582336425781, "blob_id": "e4dce15ef781c74a64d9f2d5af9c459cbe46e6d4", "content_id": "87f730a881b8470dc0da4ca5bb8c8f21c7039c13", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2628, "license_type": "permissive", "max_line_length": 76, "num_lines": 79, "path": "/bids-validator/validators/bids/checkDatasetDescription.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nconst checkDatasetDescription = (jsonContentsDict) => {\n let issues = []\n const jsonFilePaths = Object.keys(jsonContentsDict)\n const hasDatasetDescription = jsonFilePaths.some((path) => {\n return path == '/dataset_description.json'\n })\n const hasGeneticInfo = jsonFilePaths.some((path) => {\n return path === '/genetic_info.json'\n })\n\n if (!hasDatasetDescription) {\n issues.push(new Issue({ code: 57 }))\n } else {\n const datasetDescription = jsonContentsDict['/dataset_description.json']\n\n // check to ensure that the dataset description fields are\n // properly formatted\n issues = issues.concat(checkNameField(datasetDescription.Name))\n issues = issues.concat(checkAuthorField(datasetDescription.Authors))\n\n // if genetic info json present ensure mandatory GeneticDataset present\n if (\n hasGeneticInfo &&\n !(\n 'Genetics' in datasetDescription &&\n 'Dataset' in datasetDescription.Genetics\n )\n ) {\n issues.push(new Issue({ code: 128 }))\n }\n }\n return issues\n}\n\nconst checkNameField = (name) => {\n const issues = []\n // missing name will be caught by validation (later)\n if (name !== undefined) {\n const nonws = /\\S/\n if (!name.match(nonws)) {\n issues.push(new Issue({ code: 115 }))\n }\n }\n return issues\n}\n\nconst checkAuthorField = (authors) => {\n const issues = []\n // because this test happens before schema validation,\n // we have to make sure that authors is an array\n if (authors && typeof authors == 'object' && authors.length) {\n // if any author has more than one comma, throw an error\n authors.forEach((author) => {\n if (('' + author).split(',').length > 2) {\n issues.push(new Issue({ code: 103, evidence: author }))\n }\n })\n // if authors is length 1, we want a warning for a single comma\n // and an error for multiple commas\n if (authors.length == 1) {\n const author = authors[0]\n // check the number of commas in the single author field\n if (typeof author == 'string' && author.split(',').length <= 2) {\n // if there is one or less comma in the author field,\n // we suspect that the curator has not listed everyone involved\n issues.push(new Issue({ code: 102, evidence: author }))\n }\n }\n } else {\n // if there are no authors,\n // warn user that errors could occur during doi minting\n // and that snapshots on OpenNeuro will not be allowed\n issues.push(new Issue({ code: 113, evidence: JSON.stringify(authors) }))\n }\n return issues\n}\nexport default checkDatasetDescription\n" }, { "alpha_fraction": 0.8072916865348816, "alphanum_fraction": 0.8072916865348816, "avg_line_length": 24.600000381469727, "blob_id": "b9a6197e7d382dc926e9f0aa01a8fa83179403dd", "content_id": "db07e1f667798bfaba8c96c864b41701c8ecc85d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 384, "license_type": "permissive", "max_line_length": 67, "num_lines": 15, "path": "/bids-validator/validators/nifti/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import nifti from './nii'\nimport duplicateFiles from './duplicateFiles'\nimport fieldmapWithoutMagnitude from './fieldmapWithoutMagnitude'\nimport phasediffWithoutMagnitude from './phasediffWithoutMagnitude'\nimport validate from './validate'\n\nexport const NIFTI = nifti\n\nexport default {\n nifti,\n duplicateFiles,\n fieldmapWithoutMagnitude,\n phasediffWithoutMagnitude,\n validate,\n}\n" }, { "alpha_fraction": 0.5588235259056091, "alphanum_fraction": 0.5964052081108093, "avg_line_length": 31.210525512695312, "blob_id": "b2633b632ebf389ae9298b7f4b5056fa008faec2", "content_id": "91507be387b33ea8840166097f1a9cf69c4fa546", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1224, "license_type": "permissive", "max_line_length": 84, "num_lines": 38, "path": "/bids-validator/utils/__tests__/bids_files.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport { checkSidecarForDatafiles } from '../bids_files.js'\n\ndescribe('bids_files', () => {\n describe('checkSidecarForDatafiles()', () => {\n it('matches .tsv datafile to sidecar', () => {\n const file = {\n relativePath:\n 'ds001/sub-02/func/sub-02_task-balloonanalogrisktask_run-01_events.json',\n }\n const fileList = {\n 1: {\n name: 'sub-02_task-balloonanalogrisktask_run-01_events.tsv',\n relativePath:\n 'ds001/sub-02/func/sub-02_task-balloonanalogrisktask_run-01_events.tsv',\n },\n }\n const match = checkSidecarForDatafiles(file, fileList)\n assert.isTrue(match)\n })\n\n it('does not match invalid datafile formats', () => {\n const file = {\n relativePath:\n 'ds001/sub-02/func/sub-02_task-balloonanalogrisktask_run-01_events.json',\n }\n const fileList = {\n 1: {\n name: 'sub-02_task-balloonanalogrisktask_run-01_events.tsv',\n relativePath:\n 'ds001/sub-02/func/sub-02_task-balloonanalogrisktask_run-01_events.tsn',\n },\n }\n const match = checkSidecarForDatafiles(file, fileList)\n assert.isFalse(match)\n })\n })\n})\n" }, { "alpha_fraction": 0.7112299203872681, "alphanum_fraction": 0.7112299203872681, "avg_line_length": 30.16666603088379, "blob_id": "4f4a16291046b1b1eb76a87890cbf5a96dfd5ab0", "content_id": "cf39560b570a3899849f3bd7a4b28084a1ccf481", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 187, "license_type": "permissive", "max_line_length": 48, "num_lines": 6, "path": "/bids-validator/bids_validator/__init__.py", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "\"\"\"BIDS validator common Python package.\"\"\"\nfrom .bids_validator import BIDSValidator\n__all__ = ['BIDSValidator']\n\nfrom . import _version\n__version__ = _version.get_versions()['version']\n" }, { "alpha_fraction": 0.5471698045730591, "alphanum_fraction": 0.5471698045730591, "avg_line_length": 25.5, "blob_id": "6b2129b4d88e9e5abeed90d22638677cadf01db7", "content_id": "2204c1f8e33c8c070e4941933a04919b1eeaa3ff", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 318, "license_type": "permissive", "max_line_length": 60, "num_lines": 12, "path": "/bids-validator/src/utils/memoize.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export const memoize = <T>(\n fn: (...args: any[]) => T,\n): ((...args: any[]) => T) => {\n const cache = new Map()\n const cached = function (this: any, val: T) {\n return cache.has(val)\n ? cache.get(val)\n : cache.set(val, fn.call(this, val)) && cache.get(val)\n }\n cached.cache = cache\n return cached\n}\n" }, { "alpha_fraction": 0.7023430466651917, "alphanum_fraction": 0.7066820859909058, "avg_line_length": 37.84269714355469, "blob_id": "42bc4d4ff237d80a350bb93ea71ef7e77c7a7442", "content_id": "d18579343af06a4d6475846eb8afbe35fb29d4a8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3457, "license_type": "permissive", "max_line_length": 145, "num_lines": 89, "path": "/bids-validator/utils/files/potentialLocations.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const potentialLocations = (path) => {\n //add a '/' at the beginning of the path if it doesn't exist yet\n path = path.startsWith('/') ? path : '/' + path\n const splitPath = path.split('/')\n const filename = splitPath[splitPath.length - 1] // filename path component\n const pathComponents = splitPath.splice(0, splitPath.length - 1) // all path components before\n\n // split the filename into separate components\n const filenameComponents = filename.split('_')\n\n // create components object consisting of path + filename component lists\n const components = {\n path: pathComponents,\n filename: filenameComponents,\n }\n\n // generate relevant paths and put into closest -> root order\n const potentials = potentialPaths(components)\n if (potentials.indexOf(path) < 0) {\n return [path].concat(potentials).reverse()\n } else {\n return potentials\n }\n}\n\nconst potentialPaths = (components) => {\n let filenameComponents = components.filename // get the underscore separated file components\n let pathComponents = components.path // get the path components before file\n const fileIndex = filenameComponents.length - 1 // index of the filename in file components\n const file = filenameComponents[fileIndex] // filename (events.tsv, bold.json, etc)\n const informationalFileComponents = filenameComponents.slice(0, fileIndex) // all non-filename file path components (ses-*, sub-*, task-*, etc)\n\n // filter filename components that are allowed only in a lower directory\n // eg if we are root level we will not want sub-* included in the possible\n // paths for this level. Also we do not want to include run in that list.\n const nonPathSpecificFileComponents = informationalFileComponents.filter(\n (component) => pathComponents.indexOf(component) < 0,\n )\n\n // loop through all the directory levels - root, sub, (ses), (datatype)\n let paths = []\n pathComponents.map((component, i) => {\n const activeDirectoryComponents = pathComponents.slice(0, i + 1) // the directory components in the current working level\n const directoryString = activeDirectoryComponents.join('/') // path of active directory\n\n const prefixComponents = informationalFileComponents.filter(\n (component) => activeDirectoryComponents.indexOf(component) > -1,\n )\n\n const prefix = prefixComponents.join('_')\n for (\n let j = 0;\n j < Math.pow(2, nonPathSpecificFileComponents.length);\n j++\n ) {\n const filename = nonPathSpecificFileComponents\n .filter((value, index) => j & (1 << index))\n .concat([file])\n .join('_')\n\n // join directory + filepath strings together to get entire path\n paths.push(constructFileName(directoryString, filename, prefix))\n }\n })\n\n // There is an exception to the inheritance principle when it comes\n // to bold data .json sidecars - the potential locations *must* include\n // the task-<taskname> keyword.\n if (filenameComponents.indexOf('bold.json') > -1) {\n paths = removePathsWithoutTasknames(paths)\n }\n\n return paths\n}\n\nconst constructFileName = (directoryString, filename, prefix) => {\n // join the prefix + filename if prefix exists\n const filePathString = prefix ? [prefix, filename].join('_') : filename\n const newPath = directoryString + '/' + filePathString\n return newPath\n}\n\nconst removePathsWithoutTasknames = (paths) => {\n return paths.filter((path) => {\n return path.indexOf('task') > -1\n })\n}\n\nexport default potentialLocations\n" }, { "alpha_fraction": 0.5815768837928772, "alphanum_fraction": 0.6034348011016846, "avg_line_length": 26.84782600402832, "blob_id": "0751d65eaf165703bfb7283de943ae4f4fac07b6", "content_id": "63e37200bbda8396ad79ac44c8ec7106abf17a3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1281, "license_type": "permissive", "max_line_length": 67, "num_lines": 46, "path": "/bids-validator/utils/__tests__/filenamesOnly.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { validateFilenames } from '../filenamesOnly.js'\n\ndescribe('test filenames mode', () => {\n beforeEach(() => {\n // eslint-disable-next-line\n console.log = jest.fn()\n })\n it('throws an error when obviously non-BIDS input', async () => {\n async function* badData() {\n yield '0001'\n yield 'nope'\n yield 'not-bids'\n yield 'data'\n }\n const res = await validateFilenames(badData())\n expect(res).toBe(false)\n })\n it('passes validation with a simple dataset', async () => {\n async function* goodData() {\n yield '0001'\n yield 'CHANGES'\n yield 'dataset_description.json'\n yield 'participants.tsv'\n yield 'README'\n yield 'sub-01/anat/sub-01_T1w.nii.gz'\n yield 'T1w.json'\n }\n const res = await validateFilenames(goodData())\n expect(res).toBe(true)\n })\n it('passes validation with .bidsignore', async () => {\n async function* goodData() {\n yield 'sub-02/*'\n yield '0001'\n yield 'CHANGES'\n yield 'dataset_description.json'\n yield 'participants.tsv'\n yield 'README'\n yield 'sub-01/anat/sub-01_T1w.nii.gz'\n yield 'T1w.json'\n yield 'sub-02/not-bids-file.txt'\n }\n const res = await validateFilenames(goodData())\n expect(res).toBe(true)\n })\n})\n" }, { "alpha_fraction": 0.5264957547187805, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 23.375, "blob_id": "c5922aff0c92195c101a96f57bf6ae241fdb3e6c", "content_id": "425aa870e6b8c3e8e0301a1d72f78b7beaae7188", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 585, "license_type": "permissive", "max_line_length": 61, "num_lines": 24, "path": "/bids-validator/utils/summary/__tests__/collectDatatype.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import {\n ds001421,\n ds001734,\n ds003400,\n} from '../../../tests/data/collectModalities-data'\nimport collectDatatype from '../collectDataTypes'\n\ndescribe('collectDatatype()', () => {\n it('includes types such as T1w', () => {\n expect(collectDatatype(ds001734)).toEqual([\n 'magnitude1',\n 'magnitude2',\n 'phasediff',\n 'T1w',\n 'sbref',\n 'bold',\n 'events',\n ])\n expect(collectDatatype(ds001421)).toEqual(['pet', 'T1w'])\n })\n it('does not include T1w when missing', () => {\n expect(collectDatatype(ds003400)).not.toContain('T1w')\n })\n})\n" }, { "alpha_fraction": 0.5993091464042664, "alphanum_fraction": 0.6200345158576965, "avg_line_length": 23.125, "blob_id": "02b2e692796546d57b00d49eb1b5440580c450f8", "content_id": "c3490f04a4bdfcf2b99d9c8b9393d43a52b21577", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 579, "license_type": "permissive", "max_line_length": 58, "num_lines": 24, "path": "/bids-validator/bin/bids-validator", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "#!/usr/bin/env node\nfunction entry(cli) {\n cli(process.argv.slice(2)).catch(code => {\n process.exit(code)\n })\n}\n\ntry {\n // Test if there's a development tree to run\n require.resolve('../cli.js')\n process.env.ESBUILD_MAX_BUFFER = 64 * 1024 * 1024\n // For dev, use esbuild-runner\n require('esbuild-runner/register')\n const { default: cli } = require('../cli.js')\n entry(cli)\n} catch (err) {\n if (err.code === 'MODULE_NOT_FOUND') {\n const { default: cli } = require('bids-validator/cli')\n entry(cli)\n } else {\n console.log(err)\n process.exitCode = 1\n }\n}\n" }, { "alpha_fraction": 0.5498511791229248, "alphanum_fraction": 0.5520833134651184, "avg_line_length": 21.032787322998047, "blob_id": "8ae202621c125c01e41c9d51f031f96e9003a474", "content_id": "ad3785ac88276e4084ee5a49dc78f3a91c6a898b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1344, "license_type": "permissive", "max_line_length": 64, "num_lines": 61, "path": "/bids-validator/utils/json.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import Issue from './issues'\nimport { JSHINT } from 'jshint'\n\n/**\n * Similar to native JSON.parse but returns a promise and\n * runs jshint for more thorough error reporting\n */\nfunction parse(file, contents) {\n return new Promise((resolve) => {\n let jsObj\n let err\n try {\n jsObj = JSON.parse(contents)\n } catch (exception) {\n err = exception\n } finally {\n if (err) {\n jshint(file, contents, function (issues) {\n resolve({ issues, parsed: null })\n })\n } else {\n resolve({ issues: [], parsed: jsObj })\n }\n }\n })\n}\n\n/**\n * JSHint\n *\n * Checks known invalid JSON file\n * content in order to produce a\n * verbose error message.\n */\nfunction jshint(file, contents, callback) {\n var issues = []\n if (!JSHINT(contents)) {\n var out = JSHINT.data()\n for (var i = 0; out.errors.length > i; ++i) {\n var error = out.errors[i]\n if (error) {\n issues.push(\n new Issue({\n code: 27,\n file: file,\n line: error.line ? error.line : null,\n character: error.character ? error.character : null,\n reason: error.reason ? error.reason : null,\n evidence: error.evidence ? error.evidence : null,\n }),\n )\n }\n }\n }\n callback(issues)\n}\n\nexport default {\n parse,\n jshint,\n}\n" }, { "alpha_fraction": 0.5809701681137085, "alphanum_fraction": 0.6026119589805603, "avg_line_length": 26.346939086914062, "blob_id": "3c421e5300726382209b3103875849de5d6c582b", "content_id": "e203454c5bfb19ceee1d3cdff18322fa297c3764", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2680, "license_type": "permissive", "max_line_length": 91, "num_lines": 98, "path": "/bids-validator/validators/microscopy/__tests__/validate.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import path from 'path'\n\nimport validate from '../validate'\n\nconst dataDir = path.join(__dirname, '/data')\n\nconst jsonContent = {\n Manufacturer: 'Miltenyi Biotec',\n ManufacturersModelName: 'UltraMicroscope II',\n BodyPart: 'CSPINE',\n SampleEnvironment: 'ex vivo',\n SampleFixation: '4% paraformaldehyde, 2% glutaraldehyde',\n SampleStaining: 'Luxol fast blue',\n PixelSize: [1, 1, 1],\n PixelSizeUnits: 'um',\n Immersion: 'Oil',\n NumericalAperture: 1.4,\n Magnification: 40,\n ChunkTransformationMatrix: [\n [1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ],\n ChunkTransformationMatrixAxis: ['X', 'Y', 'Z'],\n}\n\ndescribe('validate', () => {\n it('returns error 227 with extension/id mismatch', () => {\n const fileName = 'btif_id.ome.tif'\n const files = [\n {\n name: fileName,\n relativePath: `/bids-validator/validators/microscopy/__tests__/data/${fileName}`,\n path: path.join(dataDir, fileName),\n },\n ]\n\n expect.assertions(3)\n return validate(files, {}).then((issues) => {\n expect(issues.length).toBe(2)\n expect(issues[0].code).toBe(227)\n expect(issues[1].code).toBe(226)\n })\n })\n\n it('returns error 227 with incorrect id in magic number', () => {\n const fileName = 'invalid_id.ome.tif'\n const files = [\n {\n name: fileName,\n relativePath: `/bids-validator/validators/microscopy/__tests__/data/${fileName}`,\n path: path.join(dataDir, fileName),\n },\n ]\n expect.assertions(2)\n return validate(files, {}).then((issues) => {\n expect(issues.length).toBe(1)\n expect(issues[0].code).toBe(227)\n })\n })\n\n it('returns error 227 with tif id and btf extension', () => {\n const fileName = 'tif_id.ome.btf'\n const files = [\n {\n name: fileName,\n relativePath: `/bids-validator/validators/microscopy/__tests__/data/${fileName}`,\n path: path.join(dataDir, fileName),\n },\n ]\n\n expect.assertions(2)\n return validate(files, {}).then((issues) => {\n expect(issues.length).toBe(1)\n expect(issues[0].code).toBe(227)\n })\n })\n\n it('validates with valid data', () => {\n const fileName = 'valid.ome.tif'\n const relativePath = `/bids-validator/validators/microscopy/__tests__/data/${fileName}`\n const files = [\n {\n name: fileName,\n relativePath: relativePath,\n path: path.join(dataDir, fileName),\n },\n ]\n const jsonContentDict = {}\n jsonContentDict[relativePath.replace('.ome.tif', '.json')] = jsonContent\n\n expect.assertions(1)\n return validate(files, jsonContentDict).then((issues) => {\n expect(issues.length).toBe(0)\n })\n })\n})\n" }, { "alpha_fraction": 0.7456140518188477, "alphanum_fraction": 0.7456140518188477, "avg_line_length": 16.8125, "blob_id": "d24ed59723bb87896b74bf04ba2100339f7acad2", "content_id": "cb2e4e53eb7d9296d4887ed541f201f6fc8ae99f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 570, "license_type": "permissive", "max_line_length": 57, "num_lines": 32, "path": "/bids-validator/validators/bids/index.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import start from './start'\nimport reset from './reset'\nimport quickTest from './quickTest'\nimport quickTestError from './quickTestError'\nimport fullTest from './fullTest'\nimport subIDsesIDmismatchTest from './subSesMismatchTest'\n\nconst options = {}\nconst issues = []\nconst subIDsesIDmismatchtest = subIDsesIDmismatchTest\n\nexport {\n options,\n issues,\n start,\n quickTestError,\n quickTest,\n fullTest,\n subIDsesIDmismatchtest,\n reset,\n}\n\nexport default {\n options,\n issues,\n start,\n quickTestError,\n quickTest,\n fullTest,\n subIDsesIDmismatchtest,\n reset,\n}\n" }, { "alpha_fraction": 0.6710894107818604, "alphanum_fraction": 0.6710894107818604, "avg_line_length": 25.518518447875977, "blob_id": "78dcce0dc7e122ce8a4756fc3265e1cfc7e02b60", "content_id": "6465ba468232591665117d58cd8d2968f043d29a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1432, "license_type": "permissive", "max_line_length": 79, "num_lines": 54, "path": "/bids-validator/bids_validator/test_bids_validator.py", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "\"\"\"Test BIDSValidator functionality.\n\ngit-annex and datalad are used to download a test data structure without the\nactual file contents.\n\n\"\"\"\nimport os\n\nimport pytest\nimport datalad.api\n\nfrom bids_validator import BIDSValidator\n\nHOME = os.path.expanduser('~')\n\nTEST_DATA_DICT = {\n 'eeg_matchingpennies': (\n 'https://gin.g-node.org/sappelhoff/eeg_matchingpennies'\n ),\n }\n\nEXCLUDE_KEYWORDS = ['git', 'datalad', 'sourcedata', 'bidsignore']\n\n\ndef _download_test_data(test_data_dict, dsname):\n \"\"\"Download test data using datalad.\"\"\"\n url = test_data_dict[dsname]\n dspath = os.path.join(HOME, dsname)\n datalad.api.clone(source=url, path=dspath)\n return dspath\n\n\ndef _gather_test_files(dspath, exclude_keywords):\n \"\"\"Get test files from dataset path, relative to dataset.\"\"\"\n files = []\n for r, _, f in os.walk(dspath):\n for file in f:\n fname = os.path.join(r, file)\n fname = fname.replace(dspath, '')\n if not any(keyword in fname for keyword in exclude_keywords):\n files.append(fname)\n\n return files\n\n\ndspath = _download_test_data(TEST_DATA_DICT, 'eeg_matchingpennies')\nfiles = _gather_test_files(dspath, EXCLUDE_KEYWORDS)\n\n\[email protected]('fname', files)\ndef test_is_bids(fname):\n \"\"\"Test that is_bids returns true for each file in a valid BIDS dataset.\"\"\"\n validator = BIDSValidator()\n assert validator.is_bids(fname)\n" }, { "alpha_fraction": 0.5724381804466248, "alphanum_fraction": 0.5759717226028442, "avg_line_length": 27.299999237060547, "blob_id": "05f2116af8e18be8ac242faed6d3aa4b925a7a43", "content_id": "a28e147e3f36aa015578f202246c7d4a826456d6", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 849, "license_type": "permissive", "max_line_length": 79, "num_lines": 30, "path": "/bids-validator/src/files/tsv.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/*\n * TSV\n * Module for parsing TSV\n */\nconst normalizeEOL = (str: string): string =>\n str.replace(/\\r\\n/g, '\\n').replace(/\\r/g, '\\n')\n// Typescript resolved `row && !/^\\s*$/.test(row)` as `string | boolean`\nconst isContentfulRow = (row: string): boolean => !!(row && !/^\\s*$/.test(row))\n\nexport function parseTSV(contents: string) {\n const columns: Record<string, string[]> = new Map()\n const rows: string[][] = normalizeEOL(contents)\n .split('\\n')\n .filter(isContentfulRow)\n .map((str) => str.split('\\t'))\n const headers = rows.length ? rows[0] : []\n\n headers.map((x) => {\n columns.set(x, [])\n })\n for (let i = 1; i < rows.length; i++) {\n for (let j = 0; j < headers.length; j++) {\n columns.get(headers[j]).push(rows[i][j])\n }\n }\n for (let [key, value] of columns) {\n columns[key] = value\n }\n return columns\n}\n" }, { "alpha_fraction": 0.6753246784210205, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 76, "blob_id": "c16bcdf3e4eaf6a0d3dc1bae9810c2a6d4e76f91", "content_id": "f4bce10fac938cba7869d6b6132d069038fdb7b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 77, "license_type": "permissive", "max_line_length": 76, "num_lines": 1, "path": "/bids-validator/src/deps/prettyBytes.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export { prettyBytes } from 'https://deno.land/x/[email protected]/mod.ts'\n" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.5517241358757019, "avg_line_length": 19.173913955688477, "blob_id": "93a8597c49482907e27fa20d464b3f1dc78fad5f", "content_id": "f9caa3037f452a792ad4987bb0ce68d1e6c0851c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 464, "license_type": "permissive", "max_line_length": 43, "num_lines": 23, "path": "/bids-validator/utils/files/readBuffer.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import isNode from '../isNode'\nimport fs from 'fs'\n\nconst readBuffer = (file) => {\n return new Promise((resolve, reject) => {\n if (isNode) {\n resolve(fs.readFileSync(file.path))\n } else {\n try {\n const reader = new FileReader()\n reader.onload = (event) => {\n resolve(event.target.result)\n }\n\n reader.readAsArrayBuffer(file)\n } catch (e) {\n reject(e)\n }\n }\n })\n}\n\nexport default readBuffer\n" }, { "alpha_fraction": 0.5421538352966309, "alphanum_fraction": 0.5895384550094604, "avg_line_length": 27.508771896362305, "blob_id": "8a600ecaa27d057f0013a35f8cfb7f23a0d74979", "content_id": "dd83b4329398771c4f87225b9b80fe9ee4e266d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1625, "license_type": "permissive", "max_line_length": 62, "num_lines": 57, "path": "/bids-validator/utils/summary/__tests__/collectModalities.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import {\n ds000247,\n ds001421,\n ds001734,\n ds002718,\n ds003400,\n} from '../../../tests/data/collectModalities-data'\nimport { collectModalities } from '../collectModalities'\n\ndescribe('collectModalities()', () => {\n it('returns correct values for a PET dataset', () => {\n expect(collectModalities(ds001421)).toEqual({\n primary: ['PET', 'MRI'],\n secondary: ['MRI_Structural'],\n })\n })\n it('returns correct values for an MRI dataset', () => {\n expect(collectModalities(ds001734)).toEqual({\n primary: ['MRI'],\n secondary: ['MRI_Functional', 'MRI_Structural'],\n })\n })\n it('returns correct values for an EEG dataset', () => {\n expect(collectModalities(ds002718)).toEqual({\n primary: ['EEG', 'MRI'],\n secondary: ['MRI_Structural'],\n })\n })\n it('returns correct values for an iEEG dataset', () => {\n expect(collectModalities(ds003400)).toEqual({\n primary: ['iEEG'],\n secondary: [],\n })\n })\n it('returns correct values for an MEG dataset', () => {\n expect(collectModalities(ds000247)).toEqual({\n primary: ['MEG', 'MRI'],\n secondary: ['MRI_Structural'],\n })\n })\n it('sorts other modalities ahead of MRI on ties', () => {\n const tied = [\n '/sub-01/ses-02/pet/sub-01_ses-02_pet.nii.gz',\n '/sub-01/ses-02/anat/sub-01_ses-02_T1w.nii',\n ]\n expect(collectModalities(tied)).toEqual({\n primary: ['PET', 'MRI'],\n secondary: ['MRI_Structural'],\n })\n })\n it('returns empty arrays when no matches are found', () => {\n expect(collectModalities([])).toEqual({\n primary: [],\n secondary: [],\n })\n })\n})\n" }, { "alpha_fraction": 0.5431226491928101, "alphanum_fraction": 0.5587360858917236, "avg_line_length": 34.394737243652344, "blob_id": "c5dffcb98b32f7acdac52a2422e5c88d5c45016f", "content_id": "fc929b03520c173f10649fee9ecc42c57a30d37a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2690, "license_type": "permissive", "max_line_length": 102, "num_lines": 76, "path": "/bids-validator/utils/files/__tests__/readDir-examples.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import readDir from '../readDir.js'\n\ndescribe('readDir.js - examples integration', () => {\n describe('readDir()', () => {\n it('returns expected files', async () => {\n await readDir('bids-validator/tests/data/bids-examples/ds002/').then(\n (files) => {\n const filenames = Object.values(files).map((f) => f.name)\n filenames.sort()\n expect(filenames).toHaveLength(246)\n expect(filenames[0]).toBe('CHANGES')\n expect(filenames[25]).toBe(\n 'sub-02_task-mixedeventrelatedprobe_run-01_bold.nii.gz',\n )\n expect(filenames[200]).toBe(\n 'sub-14_task-probabilisticclassification_run-02_events.tsv',\n )\n },\n )\n })\n it('correctly follows symlinks for subjects with followSymbolicLink: true', async () => {\n await readDir('bids-validator/tests/data/symlinked_subject', {\n ignoreSymlinks: false,\n }).then((files) => {\n expect(Object.keys(files)).toHaveLength(12)\n const filenames = Object.values(files).map((f) => f.name)\n filenames.sort()\n expect(filenames).toEqual([\n 'CHANGES',\n 'README',\n 'dataset_description.json',\n 'participants.tsv',\n 'sub-0-1_task-rhymejudgment_bold.nii.gz',\n 'sub-01_T1w.nii',\n 'sub-01_T1w.nii.gz',\n 'sub-01_task-rhyme-judgment_bold.nii.gz',\n 'sub-01_task-rhyme-judgment_events.tsv',\n 'sub-01_task-rhyme_judgment_bold.nii.gz',\n 'sub-01_task-rhyme_judgment_events.tsv',\n 'task-rhymejudgment_bold.json',\n ])\n })\n })\n it('correctly does not follow symlinks for subjects with followSymbolicLink: false', async () => {\n await readDir('bids-validator/tests/data/symlinked_subject', {\n ignoreSymlinks: true,\n }).then((files) => {\n expect(Object.keys(files)).toHaveLength(6)\n const filenames = Object.values(files).map((f) => f.name)\n filenames.sort()\n expect(filenames).toEqual([\n 'CHANGES',\n 'README',\n 'dataset_description.json',\n 'participants.tsv',\n 'sub-01',\n 'task-rhymejudgment_bold.json',\n ])\n })\n })\n it('returns file objects with the expected shape', async () => {\n await readDir('bids-validator/tests/data/symlinked_subject', {\n ignoreSymlinks: true,\n }).then((files) => {\n expect(Object.keys(files)).toHaveLength(6)\n Object.values(files).forEach((f) => {\n expect(Object.getOwnPropertyNames(f)).toEqual([\n 'name',\n 'path',\n 'relativePath',\n ])\n })\n })\n })\n })\n})\n" }, { "alpha_fraction": 0.5657216310501099, "alphanum_fraction": 0.5811855792999268, "avg_line_length": 28.846153259277344, "blob_id": "9f653da0b123ce06c3aebbc5b7d7c16b574329c4", "content_id": "151f0b4289ca0afc1404c0ba70049123ed032140", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 776, "license_type": "permissive", "max_line_length": 66, "num_lines": 26, "path": "/bids-validator/src/files/ignore.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals } from '../deps/asserts.ts'\nimport { FileIgnoreRules } from './ignore.ts'\n\nDeno.test('Deno implementation of FileIgnoreRules', async (t) => {\n await t.step('handles basic .bidsignore rules', () => {\n const files = [\n '/sub-01/anat/sub-01_T1w.nii.gz',\n '/dataset_description.json',\n '/README',\n '/CHANGES',\n '/participants.tsv',\n '/.git/HEAD',\n '/sub-01/anat/non-bidsy-file.xyz',\n ]\n const rules = ['.git', '**/*.xyz']\n const ignore = new FileIgnoreRules(rules)\n const filtered = files.filter((path) => !ignore.test(path))\n assertEquals(filtered, [\n '/sub-01/anat/sub-01_T1w.nii.gz',\n '/dataset_description.json',\n '/README',\n '/CHANGES',\n '/participants.tsv',\n ])\n })\n})\n" }, { "alpha_fraction": 0.7630111575126648, "alphanum_fraction": 0.7657992839813232, "avg_line_length": 22.648351669311523, "blob_id": "a4d949b0c1c7ef414d3ccf89766427e2edf697cf", "content_id": "5849d6e31f196dafb63a57b3f1a3b469bb3966d2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2152, "license_type": "permissive", "max_line_length": 54, "num_lines": 91, "path": "/bids-validator/src/types/context.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { ValidatorOptions } from '../setup/options.ts'\n\nexport interface ContextDatasetSubjects {\n sub_dirs: string[]\n participant_id: string[]\n phenotype: string[]\n}\nexport interface ContextDataset {\n dataset_description: Record<string, unknown>\n files: any[]\n tree: object\n ignored: any[]\n modalities: any[]\n subjects: ContextDatasetSubjects[]\n options?: ValidatorOptions\n}\nexport interface ContextSubjectSessions {\n ses_dirs: string[]\n session_id: string[]\n phenotype: string[]\n}\nexport interface ContextSubject {\n sessions: ContextSubjectSessions\n}\nexport interface ContextAssociationsEvents {\n path?: string\n onset?: string[]\n}\nexport interface ContextAssociationsAslcontext {\n path: string\n n_rows: number\n volume_type: string[]\n}\nexport interface ContextAssociationsM0scan {\n path: string\n}\nexport interface ContextAssociationsMagnitude {\n path: string\n}\nexport interface ContextAssociationsMagnitude1 {\n path: string\n}\nexport interface ContextAssociationsBval {\n path: string\n n_cols: number\n}\nexport interface ContextAssociationsBvec {\n path: string\n n_cols: number\n}\nexport interface ContextAssociations {\n events?: ContextAssociationsEvents\n aslcontext?: ContextAssociationsAslcontext\n m0scan?: ContextAssociationsM0scan\n magnitude?: ContextAssociationsMagnitude\n magnitude1?: ContextAssociationsMagnitude1\n bval?: ContextAssociationsBval\n bvec?: ContextAssociationsBvec\n}\nexport interface ContextNiftiHeaderDimInfo {\n freq: number\n phase: number\n slice: number\n}\nexport interface ContextNiftiHeaderXyztUnits {\n xyz: 'unknown' | 'meter' | 'mm' | 'um'\n t: 'unknown' | 'sec' | 'msec' | 'usec'\n}\nexport interface ContextNiftiHeader {\n dim_info: ContextNiftiHeaderDimInfo\n dim: number[]\n pixdim: number[]\n xyzt_units: ContextNiftiHeaderXyztUnits\n qform_code: number\n sform_code: number\n}\nexport interface Context {\n dataset: ContextDataset\n subject: ContextSubject\n path: string\n entities: object\n datatype: string\n suffix: string\n extension: string\n modality: string\n sidecar: object\n associations: ContextAssociations\n columns: object\n json: object\n nifti_header?: ContextNiftiHeader\n}\n" }, { "alpha_fraction": 0.6417170166969299, "alphanum_fraction": 0.6462467908859253, "avg_line_length": 26.43195343017578, "blob_id": "b31a048d16b40054329334d86895f1e740fd97eb", "content_id": "1135d152d53c8c7f7074804f8c602c60f5995750", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 4636, "license_type": "permissive", "max_line_length": 78, "num_lines": 169, "path": "/bids-validator/src/summary/summary.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { collectSubjectMetadata } from './collectSubjectMetadata.ts'\nimport { readAll, readerFromStreamReader } from '../deps/stream.ts'\nimport { SummaryOutput, SubjectMetadata } from '../types/validation-result.ts'\nimport { BIDSContext } from '../schema/context.ts'\n\nexport const modalityPrettyLookup: Record<string, string> = {\n mri: 'MRI',\n pet: 'PET',\n meg: 'MEG',\n eeg: 'EEG',\n ieeg: 'iEEG',\n micro: 'Microscopy',\n}\n\nconst secondaryLookup: Record<string, string> = {\n dwi: 'MRI_Diffusion',\n anat: 'MRI_Structural',\n func: 'MRI_Functional',\n perf: 'MRI_Perfusion',\n}\n\nexport function computeModalities(\n modalities: Record<string, number>,\n): string[] {\n // Order by matching file count\n const nonZero = Object.keys(modalities).filter((a) => modalities[a] !== 0)\n if (nonZero.length === 0) {\n return []\n }\n const sortedModalities = nonZero.sort((a, b) => {\n if (modalities[b] === modalities[a]) {\n // On a tie, hand it to the non-MRI modality\n if (b === 'mri') {\n return -1\n } else {\n return 0\n }\n }\n return modalities[b] - modalities[a]\n })\n return sortedModalities.map((mod) =>\n mod in modalityPrettyLookup ? modalityPrettyLookup[mod] : mod,\n )\n}\n\nexport function computeSecondaryModalities(\n secondary: Record<string, number>,\n): string[] {\n const nonZeroSecondary = Object.keys(secondary).filter(\n (a) => secondary[a] !== 0,\n )\n const sortedSecondary = nonZeroSecondary.sort(\n (a, b) => secondary[b] - secondary[a],\n )\n return sortedSecondary\n}\n\nexport class Summary {\n sessions: Set<string>\n subjects: Set<string>\n subjectMetadata: SubjectMetadata[]\n tasks: Set<string>\n totalFiles: number\n size: number\n dataProcessed: boolean\n pet: Record<string, any>\n modalitiesCount: Record<string, number>\n secondaryModalitiesCount: Record<string, number>\n datatypes: Set<string>\n schemaVersion: string\n constructor() {\n this.dataProcessed = false\n this.totalFiles = -1\n this.size = 0\n this.sessions = new Set()\n this.subjects = new Set()\n this.subjectMetadata = []\n this.tasks = new Set()\n this.pet = {}\n this.datatypes = new Set()\n this.modalitiesCount = {\n mri: 0,\n pet: 0,\n meg: 0,\n eeg: 0,\n ieeg: 0,\n microscopy: 0,\n }\n this.secondaryModalitiesCount = {\n MRI_Diffusion: 0,\n MRI_Structural: 0,\n MRI_Functional: 0,\n MRI_Perfusion: 0,\n PET_Static: 0,\n PET_Dynamic: 0,\n iEEG_ECoG: 0,\n iEEG_SEEG: 0,\n }\n this.schemaVersion = ''\n }\n get modalities() {\n return computeModalities(this.modalitiesCount)\n }\n get secondaryModalities() {\n return computeSecondaryModalities(this.secondaryModalitiesCount)\n }\n async update(context: BIDSContext): Promise<void> {\n if (context.file.path.startsWith('/derivatives') && !this.dataProcessed) {\n return\n }\n\n this.totalFiles++\n this.size += await context.file.size\n\n if ('sub' in context.entities) {\n this.subjects.add(context.entities.sub)\n }\n if ('ses' in context.entities) {\n this.sessions.add(context.entities.ses)\n }\n\n if (context.datatype.length) {\n this.datatypes.add(context.datatype)\n }\n\n if (context.extension === '.json') {\n const parsedJson = await context.json\n if ('TaskName' in parsedJson) {\n this.tasks.add(parsedJson.TaskName)\n }\n }\n if (context.modality) {\n this.modalitiesCount[context.modality]++\n }\n\n if (context.datatype in secondaryLookup) {\n const key = secondaryLookup[context.datatype]\n this.secondaryModalitiesCount[key]++\n } else if (context.datatype === 'pet' && 'rec' in context.entities) {\n if (['acstat', 'nacstat'].includes(context.entities.rec)) {\n this.secondaryModalitiesCount.PET_Static++\n } else if (['acdyn', 'nacdyn'].includes(context.entities.rec)) {\n this.secondaryModalitiesCount.PET_Dynamic++\n }\n }\n\n if (context.file.path.endsWith('participants.tsv')) {\n const tsvContents = await context.file.text()\n this.subjectMetadata = collectSubjectMetadata(tsvContents)\n }\n }\n\n formatOutput(): SummaryOutput {\n return {\n sessions: Array.from(this.sessions),\n subjects: Array.from(this.subjects),\n subjectMetadata: this.subjectMetadata,\n tasks: Array.from(this.tasks),\n modalities: this.modalities,\n secondaryModalities: this.secondaryModalities,\n totalFiles: this.totalFiles,\n size: this.size,\n dataProcessed: this.dataProcessed,\n pet: this.pet,\n datatypes: Array.from(this.datatypes),\n schemaVersion: this.schemaVersion,\n }\n }\n}\n" }, { "alpha_fraction": 0.6465256810188293, "alphanum_fraction": 0.6472809910774231, "avg_line_length": 30.5238094329834, "blob_id": "5dc9fe51f1c08ff6c0d913d94273f243f22ac227", "content_id": "947a3acc8391b2854d2e6098bf941370f5f9d763", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1324, "license_type": "permissive", "max_line_length": 75, "num_lines": 42, "path": "/bids-validator/src/tests/local/common.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { readFileTree } from '../../files/deno.ts'\nimport { FileTree } from '../../types/filetree.ts'\nimport { validate } from '../../validators/bids.ts'\nimport { ValidationResult } from '../../types/validation-result.ts'\nimport { Issue } from '../../types/issues.ts'\nimport { DatasetIssues } from '../../issues/datasetIssues.ts'\nimport { Summary } from '../../summary/summary.ts'\nimport { parseOptions, ValidatorOptions } from '../../setup/options.ts'\n\nexport async function validatePath(\n t: Deno.TestContext,\n path: string,\n options: Partial<ValidatorOptions> = {},\n): Promise<{ tree: FileTree; result: ValidationResult }> {\n let tree: FileTree = new FileTree('', '')\n let summary = new Summary()\n let result: ValidationResult = {\n issues: new DatasetIssues(),\n summary: summary.formatOutput(),\n }\n\n await t.step('file tree is read', async () => {\n tree = await readFileTree(path)\n })\n\n await t.step('completes validation', async () => {\n result = await validate(tree, {\n ...(await parseOptions([path])),\n ...options,\n })\n })\n\n return { tree, result }\n}\n\nexport function formatAssertIssue(message: string, issue?: Issue) {\n if (issue) {\n return `${message}\\n${Deno.inspect(issue, { depth: 8, colors: true })}`\n } else {\n return `${message}\\nAsserted issue is undefined`\n }\n}\n" }, { "alpha_fraction": 0.6190476417541504, "alphanum_fraction": 0.6336996555328369, "avg_line_length": 25, "blob_id": "b6b5c59cf0dd6dfb4d803c4905fd8a14fe0abe1c", "content_id": "91736280a670882e39f12f95bc98b96985b4a38f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 546, "license_type": "permissive", "max_line_length": 98, "num_lines": 21, "path": "/bids-validator/src/utils/objectPathHandler.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// https://stackoverflow.com/questions/67849097/typescript-type-narrowing-not-working-when-looping\nexport const hasProp = <K extends PropertyKey, T>(\n obj: T,\n prop: K,\n): obj is T & Record<K, unknown> => {\n return Object.prototype.hasOwnProperty.call(obj, prop)\n}\n\nexport const objectPathHandler = {\n get(target: unknown, property: string) {\n let res = target\n for (const prop of property.split('.')) {\n if (hasProp(res, prop)) {\n res = res[prop]\n } else {\n return undefined\n }\n }\n return res\n },\n}\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6920821070671082, "avg_line_length": 36.88888931274414, "blob_id": "ed53352046bf8f6079a76ca858be665515c6de00", "content_id": "67c737a33325372417464f65de44769acc1da4a1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 682, "license_type": "permissive", "max_line_length": 89, "num_lines": 18, "path": "/bids-validator/tests/potentialLocations.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport potentialLocations from '../utils/files/potentialLocations'\n\ndescribe('potentialLocations', () => {\n it('should not return duplicate paths', () => {\n const path = 'data/BIDS-examples/ds001'\n const pLs = potentialLocations(path)\n assert.deepEqual(pLs.length, new Set(pLs).size)\n })\n it('.bold files should only return potential locations that include tasknames', () => {\n const path = 'dsTest/sub-01/func/sub-01_task-testing_run-01_bold.json'\n const pLs = potentialLocations(path)\n const anyNonTaskSpecific = pLs.some(\n (location) => location.indexOf('task') < 0,\n )\n assert.equal(anyNonTaskSpecific, false)\n })\n})\n" }, { "alpha_fraction": 0.6304079294204712, "alphanum_fraction": 0.6365883946418762, "avg_line_length": 28.962963104248047, "blob_id": "f99195965d5f5ba8cccc18a5c77b1e74313553de", "content_id": "a9bda5580b9c42ce5d38fa4d31d2c313d4410ce3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 809, "license_type": "permissive", "max_line_length": 74, "num_lines": 27, "path": "/bids-validator/validators/nifti/fieldmapWithoutMagnitude.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const Issue = require('../../utils').issues.Issue\n\nconst fieldmapWithoutMagnitude = (files) => {\n // Check for _fieldmap nifti exists without corresponding _magnitude\n const issues = []\n const niftiNames = files.map((nifti) => nifti.name)\n const fieldmaps = niftiNames.filter(\n (nifti) => nifti.indexOf('_fieldmap') > -1,\n )\n const magnitudes = niftiNames.filter(\n (nifti) => nifti.indexOf('_magnitude') > -1,\n )\n fieldmaps.map((nifti) => {\n const associatedMagnitudeFile = nifti.replace('fieldmap', 'magnitude')\n if (magnitudes.indexOf(associatedMagnitudeFile) === -1) {\n issues.push(\n new Issue({\n code: 91,\n file: files.find((niftiFile) => niftiFile.name == nifti),\n }),\n )\n }\n })\n return issues\n}\n\nexport default fieldmapWithoutMagnitude\n" }, { "alpha_fraction": 0.5461060404777527, "alphanum_fraction": 0.5494505763053894, "avg_line_length": 29.77941131591797, "blob_id": "ebf445eb78f6b5392b966add16730e4f92365c77", "content_id": "17a1bac404ec696772e9d53db856a427b8980342", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2093, "license_type": "permissive", "max_line_length": 88, "num_lines": 68, "path": "/bids-validator/utils/summary/collectSubjectMetadata.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "const PARTICIPANT_ID = 'participantId'\n/**\n * Go from tsv format string with participant_id as a required header to array of form\n * [\n * {\n * participantId: 'participant_id_1'\n * foo: 'x',\n * ...\n * },\n * {\n * participantId: 'participant_id_2'\n * foo: 'y',\n * ...\n * }\n * ...\n * ]\n *\n * returns null if participant_id is not a header or file contents do not exist\n * @param {string} participantsTsvContent\n */\nconst collectSubjectMetadata = (participantsTsvContent) => {\n if (participantsTsvContent) {\n const contentTable = participantsTsvContent\n .split(/\\r?\\n/)\n .filter((row) => row !== '')\n .map((row) => row.split('\\t'))\n const [snakeCaseHeaders, ...subjectData] = contentTable\n const headers = snakeCaseHeaders.map((header) =>\n header === 'participant_id' ? PARTICIPANT_ID : header,\n )\n const targetKeys = [PARTICIPANT_ID, 'age', 'sex', 'group']\n .map((key) => ({\n key,\n index: headers.findIndex((targetKey) => targetKey === key),\n }))\n .filter(({ index }) => index !== -1)\n const participantIdKey = targetKeys.find(\n ({ key }) => key === PARTICIPANT_ID,\n )\n const ageKey = targetKeys.find(({ key }) => key === 'age')\n if (participantIdKey === undefined) return null\n else\n return subjectData\n .map((data) => {\n // this first map is for transforming any data coming out of participants.tsv:\n // strip subject ids to match metadata.subjects: 'sub-01' -> '01'\n data[participantIdKey.index] = data[participantIdKey.index].replace(\n /^sub-/,\n '',\n )\n // make age an integer\n if (ageKey) data[ageKey.index] = parseInt(data[ageKey.index])\n return data\n })\n .map((data) =>\n //extract all target metadata for each subject\n targetKeys.reduce(\n (subject, { key, index }) => ({\n ...subject,\n [key]: data[index],\n }),\n {},\n ),\n )\n }\n}\n\nexport default collectSubjectMetadata\n" }, { "alpha_fraction": 0.6209715008735657, "alphanum_fraction": 0.635528564453125, "avg_line_length": 32.023136138916016, "blob_id": "8ef9b3f53d69651a86e956dac97734d896328d4d", "content_id": "bf7f54d53b8826b2551db71a94d67a60f5503790", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 25692, "license_type": "permissive", "max_line_length": 125, "num_lines": 778, "path": "/bids-validator/tests/json.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import assert from 'assert'\nimport validate from '../index'\n\ndescribe('JSON', function () {\n var file = {\n name: 'task-rest_bold.json',\n relativePath: '/task-rest_bold.json',\n }\n var jsonDict = {}\n\n it('sidecars should have key/value pair for \"RepetitionTime\" expressed in seconds', function () {\n var jsonObj = {\n RepetitionTime: 1.2,\n echo_time: 0.005,\n flip_angle: 90,\n TaskName: 'Rest',\n }\n jsonDict[file.relativePath] = jsonObj\n validate.JSON(file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n var jsonObjInval = {\n RepetitionTime: 1200,\n echo_time: 0.005,\n flip_angle: 90,\n TaskName: 'Rest',\n }\n jsonDict[file.relativePath] = jsonObjInval\n validate.JSON(file, jsonDict, function (issues) {\n assert(issues && issues.length === 1)\n })\n })\n\n it('should detect negative value for SliceTiming', function () {\n var jsonObj = {\n RepetitionTime: 1.2,\n SliceTiming: [-1.0, 0.0, 1.0],\n TaskName: 'Rest',\n }\n jsonDict[file.relativePath] = jsonObj\n validate.JSON(file, jsonDict, function (issues) {\n assert(issues.length === 1 && issues[0].code == 55)\n })\n })\n\n var meg_file = {\n name: 'sub-01_run-01_meg.json',\n relativePath: '/sub-01_run-01_meg.json',\n }\n\n it('*_meg.json sidecars should have required key/value pairs', function () {\n var jsonObj = {\n TaskName: 'Audiovis',\n SamplingFrequency: 1000,\n PowerLineFrequency: 50,\n DewarPosition: 'Upright',\n SoftwareFilters: 'n/a',\n DigitizedLandmarks: true,\n DigitizedHeadPoints: false,\n }\n jsonDict[meg_file.relativePath] = jsonObj\n validate.JSON(meg_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n\n var jsonObjInval = jsonObj\n jsonObjInval['SamplingFrequency'] = ''\n jsonDict[meg_file.relativePath] = jsonObjInval\n validate.JSON(meg_file, jsonDict, function (issues) {\n assert(issues && issues.length === 1)\n })\n })\n\n var eeg_file = {\n name: 'sub-01_run-01_eeg.json',\n relativePath: '/sub-01_run-01_eeg.json',\n }\n\n it('*.json sidecars with CogPOID or CogAtlasID fields should require a uri format', function () {\n var jsonObj = {\n TaskName: 'rest',\n SamplingFrequency: 1000,\n EEGReference: 'Cz',\n SoftwareFilters: 'n/a',\n PowerLineFrequency: 1000,\n CogAtlasID:\n 'we did a search on https://ww.idontexist.com for the word \"atlas\"',\n }\n jsonDict[eeg_file.relativePath] = jsonObj\n validate.JSON(eeg_file, jsonDict, function (issues) {\n assert(issues.length === 1)\n assert(issues[0].evidence == '.CogAtlasID should match format \"uri\"')\n })\n })\n\n it('*_eeg.json sidecars should have required key/value pairs', function () {\n var jsonObj = {\n TaskName: 'rest',\n SamplingFrequency: 1000,\n EEGReference: 'Cz',\n SoftwareFilters: {\n HighPass: { HalfAmplitudeCutOffHz: 1, RollOff: '6dB/Octave' },\n },\n PowerLineFrequency: 'n/a',\n CogPOID: 'https://www.idontexist.com',\n }\n jsonDict[eeg_file.relativePath] = jsonObj\n validate.JSON(eeg_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n\n var jsonObjInval = jsonObj\n jsonObjInval['SamplingFrequency'] = ''\n jsonDict[eeg_file.relativePath] = jsonObjInval\n validate.JSON(eeg_file, jsonDict, function (issues) {\n assert(issues && issues[0].code == 55)\n })\n })\n\n var ieeg_file = {\n name: 'sub-01_run-01_ieeg.json',\n relativePath: '/sub-01_run-01_ieeg.json',\n }\n\n it('*_ieeg.json sidecars should have required key/value pairs', function () {\n var jsonObj = {\n TaskName: 'Audiovis',\n SamplingFrequency: 10,\n PowerLineFrequency: 50,\n SoftwareFilters: {\n HighPass: { HalfAmplitudeCutOffHz: 1, RollOff: '6dB/Octave' },\n },\n iEEGReference: 'chan1',\n CogAtlasID: 'doi:thisisadoi',\n }\n jsonDict[ieeg_file.relativePath] = jsonObj\n validate.JSON(ieeg_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n var jsonObjInval = jsonObj\n jsonObjInval['Manufacturer'] = ''\n jsonDict[ieeg_file.relativePath] = jsonObjInval\n validate.JSON(ieeg_file, jsonDict, function (issues) {\n assert(issues && issues.length === 1)\n })\n })\n\n var meg_coordsystem_file = {\n name: 'sub-01/meg/sub-01_task-testing_coordsystem.json',\n relativePath: '/sub-01/meg/sub-01_task-testing_coordsystem.json',\n }\n\n it('MEG *_coordsystem.json files should have required key/value pairs', function () {\n var jsonObj = {\n FiducialsDescription: 'Fiducials were digitized using ... ',\n MEGCoordinateSystem: 'CTF',\n MEGCoordinateUnits: 'mm',\n MEGCoordinateSystemDescription: 'this is the usual ...',\n EEGCoordinateSystem: 'CapTrak',\n EEGCoordinateSystemDescription: 'RAS orientation ...',\n HeadCoilCoordinateSystem: 'Other',\n HeadCoilCoordinates: {\n LPA: [-1, 0, 0],\n RPA: [1, 0, 0],\n NAS: [0, 1, 0],\n },\n AnatomicalLandmarkCoordinates: {\n LPA: [-1, 0, 0],\n RPA: [1, 0, 0],\n NAS: [0, 1, 0],\n },\n AnatomicalLandmarkCoordinateSystem: 'Other',\n AnatomicalLandmarkCoordinateUnits: 'mm',\n }\n jsonDict[meg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(meg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 4)\n assert(\n issues[0].evidence ==\n \" should have required property 'HeadCoilCoordinateSystemDescription'\",\n )\n assert(issues[1].evidence == ' should match \"then\" schema')\n assert(\n issues[2].evidence ==\n \" should have required property 'AnatomicalLandmarkCoordinateSystemDescription'\",\n )\n assert(issues[3].evidence == ' should match \"then\" schema')\n })\n })\n\n var eeg_coordsystem_file = {\n name: 'sub-01/eeg/sub-01_task-testing_coordsystem.json',\n relativePath: '/sub-01/eeg/sub-01_task-testing_coordsystem.json',\n }\n\n it('EEG *_coordsystem.json files should have required key/value pairs', function () {\n var jsonObj = {\n IntendedFor: 'sub-01_task-testing_electrodes.tsv',\n FiducialsDescription: 'Fiducials were digitized using ... ',\n EEGCoordinateSystem: 'CapTrak',\n EEGCoordinateUnits: 'mm',\n EEGCoordinateSystemDescription: 'RAS orientation ...',\n AnatomicalLandmarkCoordinates: {\n LPA: [-1, 0, 0],\n RPA: [1, 0, 0],\n NAS: [0, 1, 0],\n },\n AnatomicalLandmarkCoordinateSystem: 'Other',\n AnatomicalLandmarkCoordinateUnits: 'mm',\n AnatomicalLandmarkCoordinateSystemDescription: '...',\n }\n jsonDict[eeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(eeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('EEG *_coordsystem.json files should not contain unaccepted *CoordinateSystem keywords', function () {\n var jsonObj = {\n EEGCoordinateSystem: 'RAS',\n EEGCoordinateUnits: 'mm',\n EEGCoordinateSystemDescription: 'RAS orientation ...',\n }\n jsonDict[eeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(eeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 5)\n assert(\n issues[0].evidence ==\n '.EEGCoordinateSystem should be equal to one of the allowed values',\n )\n assert(\n issues[4].evidence ==\n '.EEGCoordinateSystem should match some schema in anyOf',\n )\n })\n })\n\n it('EEG *_coordsystem.json schema should require *Description if *Coordsystem is \"Other\"', function () {\n var jsonObj = {\n EEGCoordinateSystem: 'Other',\n EEGCoordinateUnits: 'mm',\n EEGCoordinateSystemDescription: 'we did ...',\n FiducialsCoordinateSystem: 'Other',\n AnatomicalLandmarkCoordinateSystem: 'Other',\n AnatomicalLandmarkCoordinateSystemDescription: 'we did ...',\n }\n jsonDict[eeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(eeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(\n issues[0].evidence ==\n \" should have required property 'FiducialsCoordinateSystemDescription'\",\n )\n assert(issues[1].evidence == ' should match \"then\" schema')\n })\n })\n\n it('EEG *_coordsystem.json schema general requirements should not be overridden by conditional requirements', function () {\n var jsonObj = {\n EEGCoordinateSystem: 'Other',\n EEGCoordinateSystemDescription: 'We used a ...',\n AnatomicalLandmarkCoordinateSystem: 'Other',\n }\n jsonDict[eeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(eeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 3)\n assert(\n issues[0].evidence ==\n \" should have required property 'EEGCoordinateUnits'\",\n )\n assert(\n issues[1].evidence ==\n \" should have required property 'AnatomicalLandmarkCoordinateSystemDescription'\",\n )\n assert(issues[2].evidence == ' should match \"then\" schema')\n })\n })\n\n var ieeg_coordsystem_file = {\n name: 'sub-01/ieeg/sub-01_task-testing_coordsystem.json',\n relativePath: '/sub-01/ieeg/sub-01_task-testing_coordsystem.json',\n }\n\n it('iEEG *_coordsystem.json files should have required key/value pairs', function () {\n var jsonObj = {\n iEEGCoordinateSystem: 'Pixels',\n iEEGCoordinateUnits: 'pixels',\n }\n jsonDict[ieeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(ieeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('If iEEG CoordinateSystem is \"Pixels\", then CoordinateUnits must be \"pixels\"', function () {\n var jsonObj = {\n iEEGCoordinateSystem: 'Pixels',\n iEEGCoordinateUnits: 'mm',\n }\n jsonDict[ieeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(ieeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(\n issues[0].evidence ==\n '.iEEGCoordinateUnits should be equal to one of the allowed values',\n )\n assert(issues[1].evidence == ' should match \"then\" schema')\n })\n })\n\n it('iEEG *_coordsystem.json schema should require *Description if *Coordsystem is \"Other\"', function () {\n var jsonObj = {\n iEEGCoordinateSystem: 'Other',\n iEEGCoordinateUnits: 'pixels',\n }\n jsonDict[ieeg_coordsystem_file.relativePath] = jsonObj\n validate.JSON(ieeg_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(\n issues[0].evidence ==\n \" should have required property 'iEEGCoordinateSystemDescription'\",\n )\n assert(issues[1].evidence == ' should match \"then\" schema')\n })\n })\n\n it('should use inherited sidecars to find missing fields', function () {\n const multiEntryJsonDict = {}\n\n // this json file is missing the SamplingFrequency field\n const partialJsonObj = {\n TaskName: 'Audiovis',\n PowerLineFrequency: 50,\n DewarPosition: 'Upright',\n SoftwareFilters: 'n/a',\n DigitizedLandmarks: true,\n DigitizedHeadPoints: false,\n }\n multiEntryJsonDict[meg_file.relativePath] = partialJsonObj\n\n // this json file (sitting at the root directory level)\n // provides the missing json field\n const inheritedMegFile = {\n name: 'meg.json',\n relativePath: '/meg.json',\n }\n\n const restOfJsonObj = {\n SamplingFrequency: 2000,\n }\n multiEntryJsonDict[inheritedMegFile.relativePath] = restOfJsonObj\n\n // json validation will pass because (when merged) there are no\n // missing data fields\n validate.JSON(meg_file, multiEntryJsonDict, function (issues) {\n assert(issues.length == 0)\n })\n })\n\n it('should favor the sidecar on the directory level closest to the file being validated', function () {\n const multiEntryJsonDict = {}\n const lowLevelFile = {\n name: 'run-01_meg.json',\n relativePath: '/sub-01/run-01_meg.json',\n }\n\n // this json file has a good SamplingFrequency field\n const partialJsonObj = {\n TaskName: 'Audiovis',\n SamplingFrequency: 1000,\n PowerLineFrequency: 50,\n DewarPosition: 'Upright',\n SoftwareFilters: 'n/a',\n DigitizedLandmarks: true,\n DigitizedHeadPoints: false,\n }\n multiEntryJsonDict[lowLevelFile.relativePath] = partialJsonObj\n\n // this json file (sitting at the root directory level)\n // also has a SamplingFrequency field, but it is wrong.\n const inheritedMegFile = {\n name: 'meg.json',\n relativePath: '/meg.json',\n }\n\n const restOfJsonObj = {\n SamplingFrequency: '',\n }\n multiEntryJsonDict[inheritedMegFile.relativePath] = restOfJsonObj\n\n // json validation will pass because merged dictionaries prefer\n // field values of the json sidecar furthest from the root.\n // /meg.json is closer to the root than /sub-01/run-01_meg.json\n // and so the values of the latter should be preferred.\n validate.JSON(lowLevelFile, multiEntryJsonDict, function (issues) {\n assert(issues.length == 0)\n })\n })\n\n it('*_bold.json sidecars should not have EffectiveEchoSpacing > TotalReadoutTime', () => {\n // this json dictionary generates a sidecar with EffectiveEchoSpacing > TotalReadoutTime,\n // which is nonsensical\n const fieldMapJsonDict = {\n EffectiveEchoSpacing: 3,\n TotalReadoutTime: 1,\n }\n jsonDict[file.relativePath] = fieldMapJsonDict\n\n // validation should return an error of code 93\n validate.JSON(file, jsonDict, (issues) => {\n assert(issues.length == 1 && issues[0].code == '93')\n })\n })\n\n it('*_bold.json sidecars should have EffectiveEchoSpacing < TotalReadoutTime', () => {\n // this json dictionary generates a sidecar with EffectiveEchoSpacing < TotalReadoutTime,\n // which is reasonable\n const fieldMapJsonDict = {\n EffectiveEchoSpacing: 3,\n TotalReadoutTime: 5,\n }\n jsonDict[file.relativePath] = fieldMapJsonDict\n\n // validation should pass with no errors.\n validate.JSON(file, jsonDict, (issues) => {\n assert.deepEqual(issues, [])\n })\n })\n\n var genetic_info_file = {\n name: 'genetic_info.json',\n relativePath: '/genetic_info.json',\n }\n\n it('sample genetic_info.json should parse', function () {\n var jsonObj = {\n GeneticLevel: ['Genetic'],\n AnalyticalApproach: ['SNP Genotypes'],\n SampleOrigin: 'brain',\n TissueOrigin: 'gray matter',\n CellType: 'neuron',\n BrainLocation: '[-30 -15 10]',\n }\n jsonDict[genetic_info_file.relativePath] = jsonObj\n validate.JSON(genetic_info_file, jsonDict, function (issues) {\n assert.deepEqual(issues, [])\n })\n })\n\n it('genetic_info.json should use limited vocabulary for sample origin', function () {\n var jsonObj = {\n GeneticLevel: ['Genetic'],\n AnalyticalApproach: ['SNP Genotypes'],\n SampleOrigin: 'not_from_around_here',\n TissueOrigin: 'gray matter',\n CellType: 'neuron',\n BrainLocation: '[-30 -15 10]',\n }\n jsonDict[genetic_info_file.relativePath] = jsonObj\n validate.JSON(genetic_info_file, jsonDict, function (issues) {\n assert(issues.length === 1 && issues[0].code == 55)\n })\n })\n\n var dataset_description_file = {\n name: 'dataset_description.json',\n relativePath: '/dataset_description.json',\n }\n\n it('dataset_description.json should validate DatasetLinks', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n DatasetLinks: {\n mylink: 'https://www.google.com',\n deriv1: 'derivatives/derivative1',\n phantoms: 'file:///data/phantoms',\n ds000001: 'doi:10.18112/openneuro.ds000001.v1.0.0',\n },\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('dataset_description.json should raise on bad keys in DatasetLinks', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n DatasetLinks: {\n mylink: 'https://www.google.com',\n '': 'https://www.yahoo.com',\n 'mylink!': ':/path',\n 'my link': ':/another/path',\n },\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 6)\n assert(\n issues[0].evidence ==\n '.DatasetLinks should NOT be shorter than 1 characters',\n )\n assert(issues[1].evidence == \".DatasetLinks property name '' is invalid\")\n assert(\n issues[2].evidence ==\n '.DatasetLinks should match pattern \"^[a-zA-Z0-9]*$\"',\n )\n assert(\n issues[3].evidence ==\n \".DatasetLinks property name 'mylink!' is invalid\",\n )\n assert(issues[4].evidence == issues[2].evidence)\n assert(\n issues[5].evidence ==\n \".DatasetLinks property name 'my link' is invalid\",\n )\n })\n })\n\n it('dataset_description.json should raise on non-object value in DatasetLinks', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n DatasetLinks: 'https://www.google.com',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 1)\n assert(issues[0].evidence == '.DatasetLinks should be object')\n })\n })\n\n it('dataset_description.json should raise on invalid values in DatasetLinks', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n DatasetLinks: {\n mylink1: 'https://www.google.com',\n mylink2: 1,\n '': 'https://www.yahoo.com',\n },\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 3)\n assert(\n issues[0].evidence ==\n '.DatasetLinks should NOT be shorter than 1 characters',\n )\n assert(issues[1].evidence == \".DatasetLinks property name '' is invalid\")\n assert(issues[2].evidence == \".DatasetLinks['mylink2'] should be string\")\n })\n })\n\n it('dataset_description.json should validate with enum of DatasetType', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author'],\n DatasetType: 'raw',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('dataset_description.json should NOT validate with wrong enum of DatasetType', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author'],\n DatasetType: 'badenum',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 1 && issues[0].code == 55)\n })\n })\n\n it('dataset_description.json should NOT validate with number in Authors', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author', 1],\n DatasetType: 'raw',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 1 && issues[0].code == 55)\n })\n })\n\n it('dataset_description.json should validate with only required fields, no recommended', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('dataset_description.json should validate with DatasetType \"derivative\" and GeneratedBy defined', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author'],\n DatasetType: 'derivative',\n GeneratedBy: [{ Name: 'Manual' }],\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('dataset_description.json should NOT validate with DatasetType \"derivative\" and GeneratedBy empty', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author'],\n DatasetType: 'derivative',\n GeneratedBy: [],\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 1)\n assert(\n issues[0].code == 55 &&\n issues[0].evidence ==\n '.GeneratedBy should NOT have fewer than 1 items',\n )\n })\n })\n\n it('dataset_description.json should NOT validate with DatasetType \"derivative\" and GeneratedBy missing', function () {\n var jsonObj = {\n Name: 'Example Name',\n BIDSVersion: '1.4.0',\n Authors: ['example author'],\n DatasetType: 'derivative',\n }\n jsonDict[dataset_description_file.relativePath] = jsonObj\n validate.JSON(dataset_description_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(\n issues[0].code == 55 &&\n issues[0].evidence == \" should have required property 'GeneratedBy'\",\n )\n })\n })\n\n var beh_file = {\n name: 'sub-01_run-01_beh.json',\n relativePath: '/sub-01_run-01_beh.json',\n }\n\n it('*beh.json sidecars with CogPOID or CogAtlasID fields should require a uri format', function () {\n var jsonObj = {\n TaskName: 'stroop',\n CogAtlasID:\n 'we did a search on https://ww.idontexist.com for the word \"atlas\"',\n CogPOID:\n 'we did a search on https://ww.idontexisteither.com for the word \"paradigm\"',\n }\n jsonDict[beh_file.relativePath] = jsonObj\n validate.JSON(beh_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(issues[0].evidence == '.CogAtlasID should match format \"uri\"')\n assert(issues[1].evidence == '.CogPOID should match format \"uri\"')\n })\n })\n\n it('*beh.json with extra content throws no error', function () {\n var jsonObj = {\n TaskName: 'stroop',\n trial: {\n LongName: 'Trial name',\n Description: 'Indicator of the type of trial',\n Levels: {\n congruent: 'Word and color font are congruent.',\n incongruent: 'Word and color font are not congruent.',\n },\n },\n }\n jsonDict[beh_file.relativePath] = jsonObj\n validate.JSON(beh_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n var nirs_file = {\n name: 'sub-01_run-01_nirs.json',\n relativePath: '/sub-01_run-01_nirs.json',\n }\n\n it('*_nirs.json sidecars should have required key/value pairs', function () {\n var jsonObj = {\n TaskName: 'Audiovis',\n SamplingFrequency: 7,\n NIRSChannelCount: 7,\n NIRSSourceOptodeCount: 7,\n NIRSDetectorOptodeCount: 7,\n CapManufacturer: 'EasyCap',\n CapManufacturersModelName: 'actiCAP 64 Ch Standard-2',\n }\n jsonDict[nirs_file.relativePath] = jsonObj\n validate.JSON(nirs_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n var jsonObjInval = jsonObj\n jsonObjInval['BadKey'] = ''\n jsonDict[nirs_file.relativePath] = jsonObjInval\n validate.JSON(nirs_file, jsonDict, function (issues) {\n assert(issues && issues.length === 1)\n })\n })\n var nirs_coordsystem_file = {\n name: 'sub-01/nirs/sub-01_task-testing_coordsystem.json',\n relativePath: '/sub-01/nirs/sub-01_task-testing_coordsystem.json',\n }\n\n it('NIRS *_coordsystem.json files should have required key/value pairs', function () {\n var jsonObj = {\n NIRSCoordinateSystem: 'fsaverage',\n NIRSCoordinateUnits: 'mm',\n }\n jsonDict[nirs_coordsystem_file.relativePath] = jsonObj\n validate.JSON(nirs_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n })\n\n it('NIRS *_coordsystem.json schema should require *Description if *Coordsystem is \"Other\"', function () {\n var jsonObj = {\n NIRSCoordinateSystem: 'Other',\n NIRSCoordinateUnits: 'mm',\n }\n jsonDict[nirs_coordsystem_file.relativePath] = jsonObj\n validate.JSON(nirs_coordsystem_file, jsonDict, function (issues) {\n assert(issues.length === 2)\n assert(\n issues[0].evidence ==\n \" should have required property 'NIRSCoordinateSystemDescription'\",\n )\n assert(issues[1].evidence == ' should match \"then\" schema')\n })\n })\n\n var motion_file = {\n name: 'sub-01_ses-VR_task-dance_tracksys-Unity_motion.json',\n relativePath: '/sub-01_ses-VR_task-dance_tracksys-Unity_motion.json',\n }\n\n it('*_motion.json sidecars should have required key/value pairs', function () {\n var jsonObj = {\n TaskName: 'Dance',\n SamplingFrequency: 90,\n MotionChannelCount: 7,\n POSChannelCount: 3,\n ORNTChannelCount: 4,\n }\n jsonDict[motion_file.relativePath] = jsonObj\n validate.JSON(motion_file, jsonDict, function (issues) {\n assert(issues.length === 0)\n })\n var jsonObjInval = jsonObj\n jsonObjInval['BadKey'] = ''\n jsonDict[motion_file.relativePath] = jsonObjInval\n validate.JSON(motion_file, jsonDict, function (issues) {\n assert(issues && issues.length === 1)\n })\n })\n})\n" }, { "alpha_fraction": 0.5260389447212219, "alphanum_fraction": 0.5339506268501282, "avg_line_length": 31.792587280273438, "blob_id": "02719358b808191196a7618d9ca083e245580a5c", "content_id": "3eecaa430b7a03f0f163f1c1e36d3532b2081825", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 46008, "license_type": "permissive", "max_line_length": 195, "num_lines": 1403, "path": "/bids-validator/validators/nifti/nii.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\n\n/**\n * NIFTI\n *\n * Takes a NifTi header, a file path and a callback\n * as arguments. And calls back with any issues\n * it finds while validating against the BIDS\n * specification.\n */\nexport default function NIFTI(\n header,\n file,\n jsonContentsDict,\n bContentsDict,\n fileList,\n events,\n callback,\n) {\n const path = file.relativePath\n const issues = []\n const potentialSidecars = utils.files.potentialLocations(\n path.replace('.gz', '').replace('.nii', '.json'),\n )\n const potentialEvents = utils.files.potentialLocations(\n path.replace('.gz', '').replace('bold.nii', 'events.tsv'),\n )\n const potentialM0Scans = path.replace('_asl.nii', '_m0scan.nii')\n\n const mergedDictionary = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContentsDict,\n )\n const sidecarMessage =\n 'It can be included one of the following locations: ' +\n potentialSidecars.join(', ')\n const eventsMessage =\n 'It can be included one of the following locations: ' +\n potentialEvents.join(', ')\n\n if (path.includes('_asl.nii')) {\n if (!mergedDictionary.hasOwnProperty('MagneticFieldStrength')) {\n issues.push(\n new Issue({\n file: file,\n code: 182,\n reason:\n \"You must define 'MagneticFieldStrength' for this file. It is required for perfusion quantification, to infer default relaxation values for blood/tissue.\" +\n sidecarMessage,\n }),\n )\n }\n\n if (!mergedDictionary.hasOwnProperty('Manufacturer')) {\n issues.push(\n new Issue({\n file: file,\n code: 164,\n reason:\n \"You should define 'Manufacturer' for this file. This may reflect site differences in multi-site study (especially readout differences, but perhaps also labeling differences). \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('ArterialSpinLabelingType')) {\n issues.push(\n new Issue({\n file: file,\n code: 133,\n reason:\n \"You should define 'ArterialSpinLabelingType' for this file. If you don't provide this information CBF quantification will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('MRAcquisitionType')) {\n issues.push(\n new Issue({\n file: file,\n code: 155,\n reason:\n \"You should define 'MRAcquisitionType' for this file. If you don't provide this information CBF quantification will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n if (\n mergedDictionary.hasOwnProperty('ArterialSpinLabelingType') &&\n mergedDictionary['ArterialSpinLabelingType'].constructor === String\n ) {\n const ArterialSpinLabelingTypeString =\n mergedDictionary['ArterialSpinLabelingType']\n\n if (ArterialSpinLabelingTypeString == 'PASL') {\n if (!mergedDictionary.hasOwnProperty('LabelingSlabThickness')) {\n issues.push(\n new Issue({\n file: file,\n code: 142,\n reason:\n \"You should define 'LabelingSlabThickness' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('BolusCutOffFlag')) {\n issues.push(\n new Issue({\n file: file,\n code: 147,\n reason:\n \"You should define 'BolusCutOffFlag' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (\n mergedDictionary.hasOwnProperty('BolusCutOffFlag') &&\n mergedDictionary['BolusCutOffFlag'].constructor === Boolean\n ) {\n const BolusCutOffFlagBoolean = mergedDictionary['BolusCutOffFlag']\n\n if (\n BolusCutOffFlagBoolean === true &&\n !mergedDictionary.hasOwnProperty('BolusCutOffDelayTime')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 149,\n reason:\n \"You should define 'BolusCutOffDelayTime' for this file.\" +\n sidecarMessage,\n }),\n )\n } else if (\n BolusCutOffFlagBoolean === true &&\n mergedDictionary.hasOwnProperty('BolusCutOffDelayTime') &&\n mergedDictionary['BolusCutOffDelayTime'].constructor === Number &&\n mergedDictionary['BolusCutOffDelayTime'] > 10\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 186,\n reason:\n \"'BolusCutOffDelayTime' is greater than 10, are you sure it's expressed in seconds? \",\n }),\n )\n } else if (\n BolusCutOffFlagBoolean === true &&\n mergedDictionary.hasOwnProperty('BolusCutOffDelayTime') &&\n mergedDictionary['BolusCutOffDelayTime'].constructor === Array\n ) {\n let BolusCutOffDelayTime = mergedDictionary['BolusCutOffDelayTime']\n const BolusCutOffDelayTimeWarning = BolusCutOffDelayTime.filter(\n (x) => x > 10,\n )\n if (BolusCutOffDelayTimeWarning.length > 0) {\n issues.push(\n new Issue({\n file: file,\n code: 186,\n reason:\n \"Some values of the 'BolusCutOffDelayTime' array you defined are greater than 10, are you sure they are expressed in seconds? \",\n }),\n )\n }\n }\n\n if (\n mergedDictionary.hasOwnProperty('BolusCutOffDelayTime') &&\n mergedDictionary['BolusCutOffDelayTime'].constructor === Array\n ) {\n let BolusCutOffDelayTime = mergedDictionary['BolusCutOffDelayTime']\n const MonotonicallyIncreasingBolusCutOffDelayTime =\n isMonotonicIncreasingArray(BolusCutOffDelayTime)\n if (!MonotonicallyIncreasingBolusCutOffDelayTime) {\n issues.push(\n new Issue({\n file: file,\n code: 192,\n reason:\n \"'BolusCutOffDelayTime' should be monotonically increasing.\",\n }),\n )\n }\n }\n\n if (\n BolusCutOffFlagBoolean === true &&\n !mergedDictionary.hasOwnProperty('BolusCutOffTechnique')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 150,\n reason:\n \"You should define 'BolusCutOffTechnique' for this file.\" +\n sidecarMessage,\n }),\n )\n }\n }\n\n if (\n mergedDictionary.hasOwnProperty('CASLType') ||\n mergedDictionary.hasOwnProperty('PCASLType') ||\n mergedDictionary.hasOwnProperty('LabelingPulseAverageGradient') ||\n mergedDictionary.hasOwnProperty('LabelingPulseMaximumGradient') ||\n mergedDictionary.hasOwnProperty('LabelingPulseAverageB1') ||\n mergedDictionary.hasOwnProperty('LabelingPulseDuration') ||\n mergedDictionary.hasOwnProperty('LabelingPulseFlipAngle') ||\n mergedDictionary.hasOwnProperty('LabelingPulseInterval') ||\n mergedDictionary.hasOwnProperty('LabelingDuration')\n ) {\n var CASLTypeString = ''\n var PCASLTypeString = ''\n var LabelingPulseAverageGradientString = ''\n var LabelingPulseMaximumGradientString = ''\n var LabelingPulseAverageB1String = ''\n var LabelingPulseDurationString = ''\n var LabelingPulseFlipAngleString = ''\n var LabelingPulseIntervalString = ''\n var LabelingDurationString = ''\n\n if (mergedDictionary.hasOwnProperty('CASLType'))\n CASLTypeString = \"'CASLType', \"\n if (mergedDictionary.hasOwnProperty('PCASLType'))\n PCASLTypeString = \"'PCASLType', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseAverageGradient'))\n LabelingPulseAverageGradientString =\n \"'LabelingPulseAverageGradient', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseMaximumGradient'))\n LabelingPulseMaximumGradientString =\n \"'LabelingPulseMaximumGradient', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseAverageB1'))\n LabelingPulseAverageB1String = \"'LabelingPulseAverageB1', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseDuration'))\n LabelingPulseDurationString = \"'LabelingPulseDuration', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseFlipAngle'))\n LabelingPulseFlipAngleString = \"'LabelingPulseFlipAngle', \"\n if (mergedDictionary.hasOwnProperty('LabelingPulseInterval'))\n LabelingPulseIntervalString = \"'LabelingPulseInterval', \"\n if (mergedDictionary.hasOwnProperty('LabelingDuration'))\n LabelingDurationString = \"'LabelingDuration', \"\n\n issues.push(\n new Issue({\n file: file,\n code: 190,\n reason:\n \"You defined one of the not allowed fields in case PASL 'ArterialSpinLabelingType'. Please verify \" +\n CASLTypeString +\n PCASLTypeString +\n LabelingPulseAverageGradientString +\n LabelingPulseMaximumGradientString +\n LabelingPulseAverageB1String +\n LabelingPulseDurationString +\n LabelingPulseFlipAngleString +\n LabelingPulseIntervalString +\n LabelingDurationString +\n ' and change accordingly.',\n }),\n )\n }\n }\n\n if (\n ArterialSpinLabelingTypeString == 'CASL' ||\n ArterialSpinLabelingTypeString == 'PCASL'\n ) {\n if (\n ArterialSpinLabelingTypeString == 'CASL' &&\n mergedDictionary.hasOwnProperty('PCASLType')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 191,\n reason:\n \"You defined the 'PCASLType' with a CASL 'LabellingType'. This is not allowed.\",\n }),\n )\n }\n if (\n ArterialSpinLabelingTypeString == 'PCASL' &&\n mergedDictionary.hasOwnProperty('CASLType')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 191,\n reason:\n \"You defined the 'CASLType' with a PCASL 'LabellingType'. This is not allowed.\",\n }),\n )\n }\n if (\n mergedDictionary.hasOwnProperty('PASLType') ||\n mergedDictionary.hasOwnProperty('LabelingSlabThickness') ||\n mergedDictionary.hasOwnProperty('BolusCutOffFlag') ||\n mergedDictionary.hasOwnProperty('BolusCutOffTimingSequence') ||\n mergedDictionary.hasOwnProperty('BolusCutOffDelayTime') ||\n mergedDictionary.hasOwnProperty('BolusCutOffTechnique')\n ) {\n var PASLTypeString = ''\n var LabelingSlabThicknessString = ''\n var BolusCutOffFlagString = ''\n var BolusCutOffTimingSequenceString = ''\n var BolusCutOffDelayTimeString = ''\n var BolusCutOffTechniqueString = ''\n\n if (mergedDictionary.hasOwnProperty('PASLType'))\n PASLTypeString = \" 'PASLType', \"\n if (mergedDictionary.hasOwnProperty('LabelingSlabThickness'))\n LabelingSlabThicknessString = \" 'LabelingSlabThickness', \"\n if (mergedDictionary.hasOwnProperty('BolusCutOffFlag'))\n BolusCutOffFlagString = \" 'BolusCutOffFlag', \"\n if (mergedDictionary.hasOwnProperty('BolusCutOffTimingSequence'))\n BolusCutOffTimingSequenceString = \" 'BolusCutOffTimingSequence', \"\n if (mergedDictionary.hasOwnProperty('BolusCutOffDelayTime'))\n BolusCutOffDelayTimeString = \" 'BolusCutOffDelayTime', \"\n if (mergedDictionary.hasOwnProperty('BolusCutOffTechnique'))\n BolusCutOffTechniqueString = \" 'BolusCutOffTechnique', \"\n\n issues.push(\n new Issue({\n file: file,\n code: 189,\n reason:\n \"You defined one of the not allowed fields in case of CASL or PCASL 'ArterialSpinLabelingType'. Please verify \" +\n PASLTypeString +\n LabelingSlabThicknessString +\n BolusCutOffFlagString +\n BolusCutOffTimingSequenceString +\n BolusCutOffDelayTimeString +\n BolusCutOffTechniqueString +\n ' and change accordingly.',\n }),\n )\n }\n\n if (!mergedDictionary.hasOwnProperty('LabelingDuration')) {\n issues.push(\n new Issue({\n file: file,\n code: 134,\n reason:\n \"You should define 'LabelingDuration' for this file. If you don't provide this information CBF quantification will not be possible. \" +\n 'LabelingDuration is the total duration, in seconds, of the labeling pulse train. ' +\n sidecarMessage,\n }),\n )\n } else {\n if (\n header &&\n mergedDictionary['LabelingDuration'].constructor === Array\n ) {\n let LabelingDuration = mergedDictionary['LabelingDuration']\n const LabelingDurationLength = LabelingDuration.length\n const kDim = header.dim[4]\n if (LabelingDurationLength !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 157,\n reason:\n \"'LabelingDuration' for this file does not match the 4th dimension of the NIFTI header. \",\n }),\n )\n }\n const LabelingDurationWarning = LabelingDuration.filter(\n (x) => x > 10,\n )\n if (LabelingDurationWarning.length > 0) {\n issues.push(\n new Issue({\n file: file,\n code: 187,\n reason:\n \"In the 'LabelingDuration' array some values are greater than 10, are you sure they are expressed in seconds? \",\n }),\n )\n }\n }\n if (\n mergedDictionary['LabelingDuration'].constructor === Number &&\n mergedDictionary['LabelingDuration'] > 10\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 187,\n reason:\n \"'LabelingDuration' is greater than 10, are you sure it's expressed in seconds?\",\n }),\n )\n }\n }\n }\n }\n\n if (!mergedDictionary.hasOwnProperty('PostLabelingDelay')) {\n issues.push(\n new Issue({\n file: file,\n code: 135,\n reason:\n \"You should define 'PostLabelingDelay' for this file. If you don't provide this information CBF quantification will not be possible. \" +\n sidecarMessage,\n }),\n )\n } else {\n if (\n mergedDictionary['PostLabelingDelay'].constructor === Number &&\n mergedDictionary['PostLabelingDelay'] > 10\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 184,\n reason:\n \"'PostLabelingDelay' is greater than 10, are you sure it's expressed in seconds?\",\n }),\n )\n }\n\n if (\n header &&\n mergedDictionary['PostLabelingDelay'].constructor === Array\n ) {\n let PostLabelingDelay = mergedDictionary['PostLabelingDelay']\n const PostLabelingDelayLength = PostLabelingDelay.length\n const kDim = header.dim[4]\n if (PostLabelingDelayLength !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 173,\n reason:\n \"'PostLabelingDelay' for this file does not match the 4th dimension of the NIFTI header. \",\n }),\n )\n }\n const PostLabelingDelayWarning = PostLabelingDelay.filter((x) => x > 10)\n if (PostLabelingDelayWarning.length > 0) {\n issues.push(\n new Issue({\n file: file,\n code: 184,\n reason:\n \"In the 'PostLabelingDelay' array some values are greater than 10, are you sure they are expressed in seconds? \",\n }),\n )\n }\n }\n }\n\n if (!mergedDictionary.hasOwnProperty('BackgroundSuppression')) {\n issues.push(\n new Issue({\n file: file,\n code: 136,\n reason:\n \"You should define 'BackgroundSuppression' for this file. If you don't provide this information CBF quantification will be biased. \" +\n sidecarMessage,\n }),\n )\n }\n if (mergedDictionary.hasOwnProperty('BackgroundSuppression')) {\n if (mergedDictionary['BackgroundSuppression'] == true) {\n if (\n !mergedDictionary.hasOwnProperty('BackgroundSuppressionPulseTime')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 144,\n reason:\n \"You should define 'BackgroundSuppressionPulseTime' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (\n !mergedDictionary.hasOwnProperty('BackgroundSuppressionNumberPulses')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 179,\n reason:\n \"You should define 'BackgroundSuppressionNumberPulses' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n }\n if (\n mergedDictionary.hasOwnProperty('BackgroundSuppressionNumberPulses') &&\n mergedDictionary.hasOwnProperty('BackgroundSuppressionPulseTime')\n ) {\n var BackgroundSuppressionNumberPulses =\n mergedDictionary['BackgroundSuppressionNumberPulses']\n var BackgroundSuppressionPulseTime =\n mergedDictionary['BackgroundSuppressionPulseTime']\n const kDim = BackgroundSuppressionPulseTime.length\n if (BackgroundSuppressionNumberPulses !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 180,\n reason:\n 'The BackgroundSuppressionNumberPulses is ' +\n BackgroundSuppressionNumberPulses +\n ' however the array BackgroundSuppressionPulseTime array has ' +\n kDim +\n ' values. Please check the discrepancy between this two values that must coincides.' +\n sidecarMessage,\n }),\n )\n }\n }\n }\n if (!mergedDictionary.hasOwnProperty('VascularCrushing')) {\n issues.push(\n new Issue({\n file: file,\n code: 137,\n reason:\n \"You should define 'VascularCrushing' for this file. It indicates if an ASL crusher method has been used. If you don't provide this information CBF quantification could be biased. \" +\n sidecarMessage,\n }),\n )\n }\n if (\n mergedDictionary.hasOwnProperty('VascularCrushing') &&\n mergedDictionary['VascularCrushing'].constructor === Boolean &&\n mergedDictionary['VascularCrushing'] &&\n !mergedDictionary.hasOwnProperty['VascularCrushingVENC']\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 145,\n reason:\n \"You should define 'VascularCrushingVENC' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('PulseSequenceDetails')) {\n issues.push(\n new Issue({\n file: file,\n code: 138,\n reason:\n \"You should define 'PulseSequenceDetails' for this file including information beyond pulse sequence type that identifies the specific pulse sequence used. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('M0Type')) {\n issues.push(\n new Issue({\n file: file,\n code: 153,\n reason:\n \"You should define 'M0Type' for this file. \" + sidecarMessage,\n }),\n )\n } else if (\n mergedDictionary.hasOwnProperty('M0Type') &&\n mergedDictionary['M0Type'].constructor === String\n ) {\n const M0String = mergedDictionary['M0Type']\n switch (M0String) {\n case 'Separate':\n // check if an m0 scan file is available and if it is valid\n\n if (\n !checkIfSeparateM0scanExists(\n potentialM0Scans,\n fileList,\n issues,\n file,\n )\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 202,\n reason:\n \"'M0Type' property of this asl ('\" +\n file.relativePath +\n \"') does not point to an existing file('\" +\n potentialM0Scans +\n \"'). Please mind that this value should not include subject level directory \" +\n \"('/\" +\n file.relativePath.split('/')[1] +\n \"/').\",\n evidence: potentialM0Scans,\n }),\n )\n }\n\n checkIfValidFiletype(potentialM0Scans, issues, file)\n break\n case 'Included':\n // Here we need to check if the tsv includes m0scan -> move this to validateTsvColumns\n break\n case 'Estimate':\n // Check if there is an estimated value in the json file\n if (!mergedDictionary.hasOwnProperty('M0Estimate')) {\n issues.push(\n new Issue({\n file: file,\n code: 195,\n reason:\n \"You set the 'M0Type' to 'Estimate', therefore you should also define 'M0Estimate' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n break\n case 'Absent':\n if (\n checkIfSeparateM0scanExists(\n potentialM0Scans,\n fileList,\n issues,\n file,\n ) ||\n mergedDictionary.hasOwnProperty('M0Estimate')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 198,\n reason:\n \"You set the 'M0Type' to 'Absent', you should avoid to define 'M0Estimate' or to include an [_m0scan.nii.gz] for this file. \" +\n sidecarMessage,\n }),\n )\n }\n break\n }\n }\n\n if (!mergedDictionary.hasOwnProperty('FlipAngle')) {\n if (\n mergedDictionary.hasOwnProperty('LookLocker') &&\n mergedDictionary['LookLocker']\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 166,\n reason:\n \"In case of a LookLocker acquisition you must define 'FlipAngle' for this file. \" +\n sidecarMessage,\n }),\n )\n } else {\n issues.push(\n new Issue({\n file: file,\n code: 167,\n reason:\n \"You should define 'FlipAngle' for this file. \" + sidecarMessage,\n }),\n )\n }\n } else {\n if (header && mergedDictionary['FlipAngle'].constructor === Array) {\n let FlipAngle = mergedDictionary['FlipAngle']\n const FlipAngleLength = FlipAngle.length\n const kDim = header.dim[4]\n if (FlipAngleLength !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 168,\n reason:\n \"'FlipAngle' for this file do not match the 4th dimension of the NIFTI header. \" +\n sidecarMessage,\n }),\n )\n }\n }\n }\n }\n if (path.includes('_asl.nii') || path.includes('_m0scan.nii')) {\n if (!mergedDictionary.hasOwnProperty('AcquisitionVoxelSize')) {\n issues.push(\n new Issue({\n file: file,\n code: 143,\n reason:\n \"You should define 'AcquisitionVoxelSize' for this file. \" +\n sidecarMessage,\n }),\n )\n } else {\n var AcquisitionVoxelSize = mergedDictionary['AcquisitionVoxelSize']\n if (AcquisitionVoxelSize.length != 3) {\n issues.push(\n new Issue({\n file: file,\n code: 156,\n reason:\n \"The 'AcquisitionVoxelSize' field length is not 3. AcquisitionVoxelSize must be defined as a vector of length 3. \" +\n sidecarMessage,\n }),\n )\n }\n }\n if (!mergedDictionary.hasOwnProperty('RepetitionTimePreparation')) {\n issues.push(\n new Issue({\n file: file,\n code: 200,\n reason:\n \"'RepetitionTimePreparation' must be defined for this file. \" +\n sidecarMessage,\n }),\n )\n } else if (\n header &&\n mergedDictionary.hasOwnProperty('RepetitionTimePreparation') &&\n mergedDictionary['RepetitionTimePreparation'].constructor === Array\n ) {\n const RepetitionTimePreparationArray =\n mergedDictionary['RepetitionTimePreparation']\n const kDim = header.dim[4]\n if (RepetitionTimePreparationArray.length !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 201,\n evidence:\n 'RepetitionTimePreparation array is of length ' +\n RepetitionTimePreparationArray.length +\n ' for this file and does not match the 4th dimension of the NIFTI header.' +\n sidecarMessage,\n }),\n )\n }\n }\n }\n\n if (path.includes('_dwi.nii')) {\n const potentialBvecs = utils.files.potentialLocations(\n path.replace('.gz', '').replace('.nii', '.bvec'),\n )\n const potentialBvals = utils.files.potentialLocations(\n path.replace('.gz', '').replace('.nii', '.bval'),\n )\n const bvec = utils.files.getBFileContent(potentialBvecs, bContentsDict)\n const bval = utils.files.getBFileContent(potentialBvals, bContentsDict)\n const bvecMessage =\n 'It can be included in one of the following locations: ' +\n potentialBvecs.join(', ')\n const bvalMessage =\n 'It can be included in one of the following locations: ' +\n potentialBvals.join(', ')\n\n if (!bvec) {\n issues.push(\n new Issue({\n code: 32,\n file: file,\n reason:\n '_dwi scans should have a corresponding .bvec file. ' + bvecMessage,\n }),\n )\n }\n if (!bval) {\n issues.push(\n new Issue({\n code: 33,\n file: file,\n reason:\n '_dwi scans should have a corresponding .bval file. ' + bvalMessage,\n }),\n )\n }\n\n if (bval && bvec && header) {\n /*\n bvec length ==3 is checked at bvec.spec.js hence following if loop does not have else block\n */\n if (bvec.replace(/^\\s+|\\s+$/g, '').split('\\n').length === 3) {\n const volumes = [\n bvec\n .split('\\n')[0]\n .replace(/^\\s+|\\s+$/g, '')\n .split(' ').length, // bvec row 1 length\n bvec\n .split('\\n')[1]\n .replace(/^\\s+|\\s+$/g, '')\n .split(' ').length, // bvec row 2 length\n bvec\n .split('\\n')[2]\n .replace(/^\\s+|\\s+$/g, '')\n .split(' ').length, // bvec row 3 length\n bval.replace(/^\\s+|\\s+$/g, '').split(' ').length, // bval row length\n header.dim[4], // header 4th dimension\n ]\n\n if (\n !volumes.every(function (v) {\n return v === volumes[0]\n })\n ) {\n issues.push(\n new Issue({\n code: 29,\n file: file,\n }),\n )\n }\n }\n }\n }\n\n if (missingEvents(path, potentialEvents, events)) {\n issues.push(\n new Issue({\n code: 25,\n file: file,\n reason:\n 'Task scans should have a corresponding events.tsv file. ' +\n eventsMessage,\n }),\n )\n }\n\n let repetitionTime, repetitionUnit\n if (header) {\n // Define repetition time from header and coerce to seconds.\n repetitionTime = header.pixdim[4]\n repetitionUnit =\n header.xyzt_units && header.xyzt_units[3] ? header.xyzt_units[3] : null\n if (repetitionUnit === 'ms') {\n repetitionTime = repetitionTime / 1000\n repetitionUnit = 's'\n }\n if (repetitionUnit === 'us') {\n repetitionTime = repetitionTime / 1000000\n repetitionUnit = 's'\n }\n }\n\n if (!mergedDictionary.invalid) {\n // task scan checks\n if (\n path.includes('_task-') &&\n !path.includes('_defacemask.nii') &&\n !path.includes('_sbref.nii')\n ) {\n if (!mergedDictionary.hasOwnProperty('TaskName')) {\n issues.push(\n new Issue({\n file: file,\n code: 50,\n reason:\n \"You have to define 'TaskName' for this file. \" + sidecarMessage,\n }),\n )\n }\n }\n\n // field map checks\n if (\n path.includes('_bold.nii') ||\n path.includes('_sbref.nii') ||\n path.includes('_dwi.nii') ||\n path.includes('_asl.nii') ||\n path.includes('_m0scan.nii')\n ) {\n if (!mergedDictionary.hasOwnProperty('EchoTime')) {\n if (path.includes('_asl.nii') || path.includes('_m0scan.nii')) {\n issues.push(\n new Issue({\n file: file,\n code: 193,\n reason:\n \"You must define 'EchoTime' for this file. If you don't provide this information a correct CBF quantification will not be possible.\" +\n sidecarMessage,\n }),\n )\n } else {\n issues.push(\n new Issue({\n file: file,\n code: 6,\n reason:\n \"You should define 'EchoTime' for this file. If you don't provide this information field map correction will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n } else {\n if (\n header &&\n mergedDictionary.hasOwnProperty('EchoTime') &&\n mergedDictionary['EchoTime'].constructor === Array\n ) {\n const EchoTimeArray = mergedDictionary['EchoTime']\n const kDim = header.dim[3]\n if (EchoTimeArray.length !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 197,\n evidence:\n 'EchoTime array is of length ' +\n EchoTimeArray.length +\n \" and the value of the 'k' dimension is \" +\n kDim +\n ' for the corresponding nifti header.',\n }),\n )\n }\n }\n }\n if (!mergedDictionary.hasOwnProperty('PhaseEncodingDirection')) {\n issues.push(\n new Issue({\n file: file,\n code: 7,\n reason:\n \"You should define 'PhaseEncodingDirection' for this file. If you don't provide this information field map correction will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('EffectiveEchoSpacing')) {\n issues.push(\n new Issue({\n file: file,\n code: 8,\n reason:\n \"You should define 'EffectiveEchoSpacing' for this file. If you don't provide this information field map correction will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n }\n if (path.includes('_dwi.nii')) {\n if (!mergedDictionary.hasOwnProperty('TotalReadoutTime')) {\n issues.push(\n new Issue({\n file: file,\n code: 9,\n reason:\n \"You should define 'TotalReadoutTime' for this file. If you don't provide this information field map correction using TOPUP might not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n }\n if (path.includes('_bold.nii') || path.includes('_asl.nii')) {\n // check that slice timing is defined\n if (!mergedDictionary.hasOwnProperty('SliceTiming')) {\n // case of ASL with 3D sequence - slice timing is not necessary\n if (\n !(\n mergedDictionary.hasOwnProperty('MRAcquisitionType') &&\n mergedDictionary.MRAcquisitionType === '3D' &&\n path.includes('_asl.nii')\n )\n ) {\n if (\n mergedDictionary.hasOwnProperty('MRAcquisitionType') &&\n mergedDictionary.MRAcquisitionType === '2D' &&\n path.includes('_asl.nii')\n ) {\n // case of ASL with 2D sequence - slice timing is required\n issues.push(\n new Issue({\n file: file,\n code: 183,\n reason:\n \"You should define 'SliceTiming' for this file. \" +\n \"If you don't provide this information slice time correction will not be possible. \" +\n sidecarMessage,\n }),\n )\n } else {\n issues.push(\n new Issue({\n file: file,\n code: 13,\n reason:\n \"You should define 'SliceTiming' for this file. \" +\n \"If you don't provide this information slice time correction will not be possible. \" +\n sidecarMessage,\n }),\n )\n }\n }\n }\n // check that slice timing has the proper length\n if (\n header &&\n mergedDictionary.hasOwnProperty('SliceTiming') &&\n mergedDictionary['SliceTiming'].constructor === Array\n ) {\n const sliceTimingArray = mergedDictionary['SliceTiming']\n const kDim = header.dim[3]\n if (sliceTimingArray.length !== kDim) {\n issues.push(\n new Issue({\n file: file,\n code: 87,\n evidence:\n 'SliceTiming array is of length ' +\n sliceTimingArray.length +\n \" and the value of the 'k' dimension is \" +\n kDim +\n ' for the corresponding nifti header.',\n }),\n )\n }\n }\n }\n // we don't need slice timing or repetition time for SBref\n if (path.includes('_bold.nii')) {\n if (\n !mergedDictionary.hasOwnProperty('RepetitionTime') &&\n !mergedDictionary.hasOwnProperty('VolumeTiming')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 10,\n reason:\n \"You have to define 'RepetitionTime' or 'VolumeTiming' for this file. \" +\n sidecarMessage,\n }),\n )\n } else if (\n header &&\n mergedDictionary.RepetitionTime &&\n mergedDictionary.EffectiveEchoSpacing &&\n mergedDictionary.PhaseEncodingDirection &&\n !mergedDictionary.hasOwnProperty('VolumeTiming')\n ) {\n var axes = { i: 1, j: 2, k: 3 }\n if (\n mergedDictionary.EffectiveEchoSpacing *\n header.dim[axes[mergedDictionary.PhaseEncodingDirection[0]]] >\n mergedDictionary.RepetitionTime\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 76,\n reason:\n \"Abnormally high value of 'EffectiveEchoSpacing' (\" +\n mergedDictionary.EffectiveEchoSpacing +\n ' seconds).',\n }),\n )\n }\n } else if (\n mergedDictionary.hasOwnProperty('VolumeTiming') &&\n mergedDictionary.hasOwnProperty('RepetitionTime')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 178,\n reason:\n \"'VolumeTiming' and 'RepetitionTime' for this file are mutually exclusive.\" +\n sidecarMessage,\n }),\n )\n } else if (\n mergedDictionary.hasOwnProperty('VolumeTiming') &&\n !mergedDictionary.hasOwnProperty('RepetitionTime')\n ) {\n if (\n mergedDictionary.hasOwnProperty('VolumeTiming') &&\n !mergedDictionary.hasOwnProperty('SliceTiming') &&\n !mergedDictionary.hasOwnProperty('AcquisitionDuration')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 171,\n }),\n )\n }\n let VolumeTiming = mergedDictionary['VolumeTiming']\n const MonotonicallyIncreasingVolumeTiming =\n isMonotonicIncreasingArray(VolumeTiming)\n if (!MonotonicallyIncreasingVolumeTiming) {\n issues.push(\n new Issue({\n file: file,\n code: 188,\n reason: \"'VolumeTiming' should be monotonically increasing.\",\n }),\n )\n }\n }\n if (typeof repetitionTime === 'undefined' && header) {\n issues.push(\n new Issue({\n file: file,\n code: 75,\n }),\n )\n } else if (mergedDictionary.RepetitionTime && header) {\n if (repetitionUnit !== 's') {\n issues.push(\n new Issue({\n file: file,\n code: 11,\n }),\n )\n }\n\n const niftiTR = Number(repetitionTime).toFixed(3)\n const jsonTR = Number(mergedDictionary.RepetitionTime).toFixed(3)\n if (niftiTR !== jsonTR) {\n issues.push(\n new Issue({\n file: file,\n code: 12,\n reason:\n 'Repetition time defined in the JSON (' +\n jsonTR +\n ' sec.) did not match the one defined in the NIFTI header (' +\n niftiTR +\n ' sec.)',\n }),\n )\n }\n }\n\n // check that slice timing values are greater than repetition time\n if (\n mergedDictionary.hasOwnProperty('SliceTiming') &&\n mergedDictionary['SliceTiming'].constructor === Array\n ) {\n const SliceTimingArray = mergedDictionary['SliceTiming']\n const valuesGreaterThanRepetitionTime =\n sliceTimingGreaterThanRepetitionTime(\n SliceTimingArray,\n mergedDictionary['RepetitionTime'],\n )\n if (valuesGreaterThanRepetitionTime.length > 0) {\n issues.push(\n new Issue({\n file: file,\n code: 66,\n evidence: valuesGreaterThanRepetitionTime.join(', '),\n }),\n )\n }\n }\n } else if (path.includes('_phasediff.nii')) {\n if (\n !mergedDictionary.hasOwnProperty('EchoTime1') ||\n !mergedDictionary.hasOwnProperty('EchoTime2')\n ) {\n issues.push(\n new Issue({\n file: file,\n code: 15,\n reason:\n \"You have to define 'EchoTime1' and 'EchoTime2' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (\n mergedDictionary.hasOwnProperty('EchoTime1') &&\n mergedDictionary.hasOwnProperty('EchoTime2')\n ) {\n const echoTimeDifference =\n mergedDictionary['EchoTime2'] - mergedDictionary['EchoTime1']\n if (echoTimeDifference < 0.0001 || echoTimeDifference > 0.01) {\n issues.push(\n new Issue({\n file: file,\n code: 83,\n reason:\n 'The value of (EchoTime2 - EchoTime1) should be within the range of 0.0001 - 0.01. ' +\n sidecarMessage,\n }),\n )\n }\n }\n } else if (path.includes('_phase1.nii') || path.includes('_phase2.nii')) {\n if (!mergedDictionary.hasOwnProperty('EchoTime')) {\n issues.push(\n new Issue({\n file: file,\n code: 16,\n reason:\n \"You have to define 'EchoTime' for this file. \" + sidecarMessage,\n }),\n )\n }\n } else if (path.includes('_fieldmap.nii')) {\n if (!mergedDictionary.hasOwnProperty('Units')) {\n issues.push(\n new Issue({\n file: file,\n code: 17,\n reason:\n \"You have to define 'Units' for this file. \" + sidecarMessage,\n }),\n )\n }\n } else if (path.includes('_epi.nii')) {\n if (!mergedDictionary.hasOwnProperty('PhaseEncodingDirection')) {\n issues.push(\n new Issue({\n file: file,\n code: 18,\n reason:\n \"You have to define 'PhaseEncodingDirection' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n if (!mergedDictionary.hasOwnProperty('TotalReadoutTime')) {\n issues.push(\n new Issue({\n file: file,\n code: 19,\n reason:\n \"You have to define 'TotalReadoutTime' for this file. \" +\n sidecarMessage,\n }),\n )\n }\n }\n\n if (\n (path.includes('_m0scan.nii') ||\n utils.type.file.isFieldMapMainNii(path)) &&\n mergedDictionary.hasOwnProperty('IntendedFor')\n ) {\n const intendedFor =\n typeof mergedDictionary['IntendedFor'] == 'string'\n ? [mergedDictionary['IntendedFor']]\n : mergedDictionary['IntendedFor']\n\n for (let key = 0; key < intendedFor.length; key++) {\n const intendedForFile = intendedFor[key]\n // Only check for presence of IntendedFor files if not a BIDS-URI\n // https://github.com/bids-standard/bids-validator/issues/1393\n if (!intendedForFile.startsWith('bids:')) {\n checkIfIntendedExists(intendedForFile, fileList, issues, file)\n checkIfValidFiletype(intendedForFile, issues, file)\n }\n }\n }\n }\n callback(issues)\n}\n\nfunction missingEvents(path, potentialEvents, events) {\n let hasEvent = false,\n isRest = false\n\n // check if is a rest file\n const pathParts = path.split('/')\n const filenameParts = pathParts[pathParts.length - 1].split('_')\n for (let i = 0; i < filenameParts.length; i++) {\n const part = filenameParts[i]\n if (part.toLowerCase().indexOf('task') === 0 && part.indexOf('rest') > -1) {\n isRest = true\n }\n }\n\n // check for event file\n for (let j = 0; j < potentialEvents.length; j++) {\n const event = potentialEvents[j]\n if (events.find((e) => e.path == event)) {\n hasEvent = true\n }\n }\n\n return !isRest && path.includes('_bold.nii') && !hasEvent\n}\n\n/**\n * Function to check each SliceTime from SliceTiming Array\n *\n */\n\nfunction sliceTimingGreaterThanRepetitionTime(array, repetitionTime) {\n const invalid_timesArray = []\n for (let t = 0; t < array.length; t++) {\n if (array[t] > repetitionTime) {\n invalid_timesArray.push(array[t])\n }\n }\n return invalid_timesArray\n}\n\nfunction checkIfIntendedExists(intendedForFile, fileList, issues, file) {\n const intendedForFileFull =\n '/' + file.relativePath.split('/')[1] + '/' + intendedForFile\n let onTheList = false\n\n for (let key2 in fileList) {\n if (key2) {\n const filePath = fileList[key2].relativePath\n if (filePath === intendedForFileFull) {\n onTheList = true\n }\n }\n }\n if (!onTheList) {\n issues.push(\n new Issue({\n file: file,\n code: 37,\n reason:\n \"'IntendedFor' property of this fieldmap ('\" +\n file.relativePath +\n \"') does not point to an existing file('\" +\n intendedForFile +\n \"'). Please mind that this value should not include subject level directory \" +\n \"('/\" +\n file.relativePath.split('/')[1] +\n \"/').\",\n evidence: intendedForFile,\n }),\n )\n }\n}\n\n/**\n * Functions to check if m0scan is present in various sub-types, be aware of the '-dir' pattern that could be subject to changes in future versions\n *\n */\n\nfunction checkIfSeparateM0scanExists(m0scanFile, fileList) {\n let rule = m0scanFile.replace('_m0scan.nii', '').replace('.gz', '')\n let m0scanFile_nii = m0scanFile.replace('.nii.gz', '.nii')\n let m0scanFile_niigz = m0scanFile\n\n let onTheList = false\n for (let key2 in fileList) {\n if (key2) {\n const filePath = fileList[key2].relativePath\n if (\n matchRule_m0scan(filePath, rule + '_dir-*') ||\n filePath === m0scanFile_nii ||\n filePath === m0scanFile_niigz\n ) {\n onTheList = true\n }\n }\n }\n return onTheList\n}\n\nfunction matchRule_m0scan(str, rule) {\n var escapeRegex = (str) => str.replace(/([.*+?^=!:${}()|[]\\/\\\\])/g, '\\\\$1')\n return new RegExp(\n rule.split('*').map(escapeRegex).join('.*') + '_m0scan.nii',\n ).test(str)\n}\n\nfunction checkIfValidFiletype(intendedForFile, issues, file) {\n const validFiletype = new RegExp('.nii(.gz)?$')\n const isValidFiletype = validFiletype.test(intendedForFile)\n if (!isValidFiletype) {\n issues.push(\n new Issue({\n file: file,\n code: 37,\n reason: `Invalid filetype: IntendedFor should point to the .nii[.gz] files.`,\n evidence: intendedForFile,\n }),\n )\n }\n}\n\nfunction isMonotonicIncreasingArray(A) {\n let isInc = false\n for (let i = 1; i < A.length; i++) {\n if (A[i] > A[i - 1]) {\n isInc = true\n } else {\n return false\n }\n }\n return isInc\n}\n" }, { "alpha_fraction": 0.8046647310256958, "alphanum_fraction": 0.8046647310256958, "avg_line_length": 113.33333587646484, "blob_id": "3edd31478428a983eced47a51c93ab45d1a67795", "content_id": "d499a615921e8916090c6315b70eb51a7790f5e8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 343, "license_type": "permissive", "max_line_length": 233, "num_lines": 3, "path": "/bids-validator/src/deps/README.md", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "Deno convention is to place dependencies in an module like this to keep them easy to update. Each dependency should be its own file to prevent compilation of unused dependencies if they are outside of the tree for a given entrypoint.\n\nSee [\"Manage Dependencies\"](https://deno.land/manual@main/examples/manage_dependencies) in the Deno manual.\n" }, { "alpha_fraction": 0.6161971688270569, "alphanum_fraction": 0.6267605423927307, "avg_line_length": 28.235294342041016, "blob_id": "833a38642010471f7028c3a109860f1e36d06942", "content_id": "e5fc80ef4fb609bbbf2547fd9db95d4b5272d7f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1988, "license_type": "permissive", "max_line_length": 114, "num_lines": 68, "path": "/bids-validator/build.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "#!/bin/env -S deno run --allow-read --allow-write --allow-env --allow-net --allow-run\n/**\n * Build the schema based validator for distribution (web and npm), targets browser compatible ESM\n *\n * If you would like to use this package in a Node.js project, you'll need to use native ESM or a transform system\n */\nimport * as esbuild from 'https://deno.land/x/[email protected]/mod.js'\nimport { parse } from 'https://deno.land/[email protected]/flags/mod.ts'\n\nconst MAIN_ENTRY = 'src/main.ts'\nconst CLI_ENTRY = 'src/bids-validator.ts'\n\nconst httpPlugin = {\n name: 'http',\n setup(build: esbuild.PluginBuild) {\n build.onResolve({ filter: /^https?:\\/\\// }, (args) => ({\n path: args.path,\n namespace: 'http-url',\n }))\n\n build.onResolve({ filter: /.*/, namespace: 'http-url' }, (args) => ({\n path: new URL(args.path, args.importer).toString(),\n namespace: 'http-url',\n }))\n\n build.onLoad({ filter: /.*/, namespace: 'http-url' }, async (args) => {\n const request = await fetch(args.path)\n const contents = await request.text()\n if (args.path.endsWith('.ts')) {\n return { contents, loader: 'ts' }\n } else if (args.path.endsWith('.json')) {\n return { contents, loader: 'json' }\n } else {\n return { contents, loader: 'js' }\n }\n })\n },\n}\n\nconst flags = parse(Deno.args, {\n boolean: ['minify'],\n default: { minify: false },\n})\n\nconst result = await esbuild.build({\n format: 'esm',\n entryPoints: [MAIN_ENTRY, CLI_ENTRY],\n bundle: true,\n outdir: 'dist/validator',\n minify: flags.minify,\n target: ['chrome109', 'firefox109', 'safari16'],\n plugins: [httpPlugin],\n allowOverwrite: true,\n sourcemap: flags.minify ? false : 'inline',\n})\n\nif (result.warnings.length > 0) {\n console.warn('Build reported warnings')\n console.dir(result.warnings)\n}\n\nif (result.errors.length === 0) {\n Deno.exit(0)\n} else {\n console.error(`An issue occurred building '${MAIN_ENTRY}'`)\n console.dir(result.errors)\n Deno.exit(1)\n}\n" }, { "alpha_fraction": 0.6762820482254028, "alphanum_fraction": 0.6762820482254028, "avg_line_length": 31.842105865478516, "blob_id": "fd2747d750d5a1d3fd1ec5731a7adacb7dae44da", "content_id": "2452cb7f051a05edccb602713566c491ac386a5e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 1248, "license_type": "permissive", "max_line_length": 120, "num_lines": 38, "path": "/bids-validator/src/setup/loadSchema.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { Schema } from '../types/schema.ts'\nimport { objectPathHandler } from '../utils/objectPathHandler.ts'\nimport * as schemaDefault from 'https://bids-specification.readthedocs.io/en/latest/schema.json' assert { type: 'json' }\n\n/**\n * Load the schema from the specification\n *\n * version is ignored when the network cannot be accessed\n */\nexport async function loadSchema(version = 'latest'): Promise<Schema> {\n const versionRegex = /^v\\d/\n let schemaUrl = version\n const bidsSchema = Deno.env.get('BIDS_SCHEMA')\n if (bidsSchema !== undefined) {\n schemaUrl = bidsSchema\n } else if (version === 'latest' || versionRegex.test(version)) {\n schemaUrl = `https://bids-specification.readthedocs.io/en/${version}/schema.json`\n }\n try {\n const schemaModule = await import(schemaUrl, {\n assert: { type: 'json' },\n })\n return new Proxy(\n schemaModule.default as object,\n objectPathHandler,\n ) as Schema\n } catch (error) {\n // No network access or other errors\n console.error(error)\n console.error(\n `Warning, could not load schema from ${schemaUrl}, falling back to internal version`,\n )\n return new Proxy(\n schemaDefault.default as object,\n objectPathHandler,\n ) as Schema\n }\n}\n" }, { "alpha_fraction": 0.5300291776657104, "alphanum_fraction": 0.5309038162231445, "avg_line_length": 25.796875, "blob_id": "1ebfae419294acfa10b3ab308fb521af7066e107", "content_id": "e659de0514f0305bd4e2697bfca400e6b0692a22", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3430, "license_type": "permissive", "max_line_length": 86, "num_lines": 128, "path": "/bids-validator-web/components/App.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import React from 'react'\nimport bowser from 'bowser'\nimport Issues from '../components/Issues'\nimport BrowserWarning from './BrowserWarning'\nimport Validate from '../components/Validate'\nimport validate from '../../bids-validator'\nimport validatorPackageJson from 'bids-validator/package.json' assert { type: 'json' }\nconst version = validatorPackageJson.version\n\n// component setup -----------------------------------------------------------\nconst initState = () => ({\n dirName: '',\n list: {},\n nameError: null,\n projectId: '',\n refs: {},\n errors: [],\n warnings: [],\n summary: null,\n status: '',\n uploadStatus: '',\n options: {\n ignoreWarnings: false,\n ignoreNiftiHeaders: false,\n ignoreSubjectConsistency: false,\n },\n})\n\nexport default class App extends React.Component {\n constructor() {\n super()\n this.state = initState()\n this.validate = this._validate.bind(this)\n this.reset = this._reset.bind(this)\n }\n\n _validate(selectedFiles) {\n const dirName = selectedFiles.list[0].webkitRelativePath.split('/')[0]\n const defaultConfig = `${dirName}/.bids-validator-config.json`\n this.setState({\n status: 'validating',\n showIssues: true,\n activeKey: 3,\n dirName,\n })\n return validate.BIDS(\n selectedFiles.list,\n {\n verbose: true,\n ...this.state.options,\n config: defaultConfig,\n },\n (issues, summary) => {\n if (issues === 'Invalid') {\n return this.setState({\n errors: 'Invalid',\n summary,\n status: 'validated',\n })\n } else {\n return this.setState({\n errors: issues.errors ? issues.errors : [],\n warnings: issues.warnings ? issues.warnings : [],\n summary,\n status: 'validated',\n })\n }\n },\n )\n }\n\n _reset() {\n this.setState(initState())\n }\n\n handleOptionToggle = (e) => {\n const { name } = e.target\n this.setState((prevState) => ({\n ...prevState,\n options: {\n ...prevState.options,\n [name]: !prevState.options[name],\n },\n }))\n }\n\n render() {\n const browserUnsupported =\n !bowser.chrome &&\n !bowser.chromium &&\n !bowser.firefox &&\n typeof window !== 'undefined'\n return (\n <div id=\"root\">\n <nav className=\"navbar navbar-dark bg-dark fixed-top\">\n <div className=\"container\">\n <div className=\"navbar-header\">\n <a\n className=\"navbar-brand\"\n href=\"https://www.npmjs.com/package/bids-validator\"\n target=\"_blank\">\n BIDS Validator v{version}\n </a>\n </div>\n </div>\n </nav>\n <div className=\"container page-wrapper\">\n <div className=\"browser-warning\">\n {browserUnsupported ? <BrowserWarning /> : null}\n </div>\n <div className=\"validator\">\n {!browserUnsupported ? (\n <Validate\n loading={this.state.status === 'validating'}\n options={this.state.options}\n onChange={this.validate}\n handleOptionToggle={this.handleOptionToggle}\n />\n ) : null}\n </div>\n {this.state.status === 'validated' ? (\n <Issues reset={this.reset} {...this.state} />\n ) : null}\n </div>\n </div>\n )\n }\n}\n" }, { "alpha_fraction": 0.6585366129875183, "alphanum_fraction": 0.707317054271698, "avg_line_length": 81, "blob_id": "7e4e5ec84de709c45800bffa07d0ff7460f90105", "content_id": "21e4a09436a725070791e6324a30c256508e8ed0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 82, "license_type": "permissive", "max_line_length": 81, "num_lines": 1, "path": "/bids-validator/src/deps/yargs.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export { default as yargs } from 'https://deno.land/x/[email protected]/deno.ts'\n" }, { "alpha_fraction": 0.713394045829773, "alphanum_fraction": 0.7154746651649475, "avg_line_length": 27.481481552124023, "blob_id": "6eac0e6cff1ceb31cb61825ecdaea83cc1ed5f53", "content_id": "34383e1cc27f8f1a31943c89dbb7fc637e0a9f42", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3845, "license_type": "permissive", "max_line_length": 80, "num_lines": 135, "path": "/bids-validator/validators/hed.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import hedValidator from 'hed-validator'\nimport cloneDeep from 'lodash/cloneDeep'\nimport intersection from 'lodash/intersection'\nimport utils from '../utils'\n\nconst Issue = utils.issues.Issue\n\nexport default function checkHedStrings(tsvs, jsonContents, jsonFiles, dir) {\n const tsvData = constructTsvData(tsvs, jsonContents)\n const sidecarData = constructSidecarData(tsvData, jsonContents, jsonFiles)\n const hedDataExists = detectHed(tsvData, sidecarData)\n if (!hedDataExists) {\n return Promise.resolve([])\n }\n\n const datasetDescription = jsonContents['/dataset_description.json']\n const datasetDescriptionData = new hedValidator.validator.BidsJsonFile(\n '/dataset_description.json',\n cloneDeep(datasetDescription),\n getSidecarFileObject('/dataset_description.json', jsonFiles),\n )\n const dataset = new hedValidator.validator.BidsDataset(\n tsvData,\n sidecarData,\n datasetDescriptionData,\n dir,\n )\n // New stuff end does parseHedVersion need to be called anymore?\n const schemaDefinitionIssues = parseHedVersion(jsonContents)\n try {\n return hedValidator.validator\n .validateBidsDataset(dataset)\n .then((hedValidationIssues) => {\n return schemaDefinitionIssues.concat(\n convertHedIssuesToBidsIssues(hedValidationIssues),\n )\n })\n } catch (error) {\n const issues = schemaDefinitionIssues.concat(\n internalHedValidatorIssue(error),\n )\n return Promise.resolve(issues)\n }\n}\n\nfunction constructTsvData(tsvFiles, jsonContents) {\n return tsvFiles.map((tsvFile) => {\n const potentialSidecars = utils.files.potentialLocations(\n tsvFile.file.relativePath.replace('.tsv', '.json'),\n )\n const mergedDictionary = utils.files.generateMergedSidecarDict(\n potentialSidecars,\n jsonContents,\n )\n let TsvFileClass\n if (tsvFile.file.relativePath.endsWith('_events.tsv')) {\n TsvFileClass = hedValidator.bids.BidsEventFile\n } else {\n TsvFileClass = hedValidator.bids.BidsTabularFile\n }\n return new TsvFileClass(\n tsvFile.path,\n potentialSidecars,\n mergedDictionary,\n tsvFile.contents,\n tsvFile.file,\n )\n })\n}\n\nfunction constructSidecarData(tsvData, jsonContents, jsonFiles) {\n const actualSidecarNames = Object.keys(jsonContents)\n const potentialSidecars = []\n for (const tsvFileData of tsvData) {\n potentialSidecars.push(...tsvFileData.potentialSidecars)\n }\n const actualEventSidecars = intersection(\n actualSidecarNames,\n potentialSidecars,\n )\n return actualEventSidecars.map((sidecarName) => {\n return new hedValidator.bids.BidsSidecar(\n sidecarName,\n cloneDeep(jsonContents[sidecarName]),\n getSidecarFileObject(sidecarName, jsonFiles),\n )\n })\n}\n\nfunction getSidecarFileObject(sidecarName, jsonFiles) {\n return cloneDeep(\n jsonFiles.filter((file) => {\n return file.relativePath === sidecarName\n })[0],\n )\n}\n\nfunction detectHed(tsvData, sidecarData) {\n return (\n sidecarData.some((sidecarFileData) => {\n return Object.values(sidecarFileData.sidecarData).some(sidecarValueHasHed)\n }) ||\n tsvData.some((tsvFileData) => {\n return tsvFileData.parsedTsv.headers.indexOf('HED') !== -1\n })\n )\n}\n\nfunction sidecarValueHasHed(sidecarValue) {\n return (\n sidecarValue !== null &&\n typeof sidecarValue === 'object' &&\n sidecarValue.HED !== undefined\n )\n}\n\nfunction parseHedVersion(jsonContents) {\n const datasetDescription = jsonContents['/dataset_description.json']\n\n if (!(datasetDescription && datasetDescription.HEDVersion)) {\n return [new Issue({ code: 109 })]\n } else {\n return []\n }\n}\n\nfunction internalHedValidatorIssue(error) {\n return Issue.errorToIssue(error, 107)\n}\n\nfunction convertHedIssuesToBidsIssues(hedIssues) {\n return hedIssues.map((hedIssue) => {\n return new Issue(hedIssue)\n })\n}\n" }, { "alpha_fraction": 0.5303326845169067, "alphanum_fraction": 0.5492498278617859, "avg_line_length": 28.480770111083984, "blob_id": "17029f2d6138acd33709edb3989e300522461a28", "content_id": "fe8b4dc54962d80f9fb300dfd176219ac8713694", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1533, "license_type": "permissive", "max_line_length": 88, "num_lines": 52, "path": "/bids-validator/utils/files/illegalCharacterTest.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import Issue from '../../utils/issues'\n\nconst re = {\n task_re:\n /sub-(.*?)_task-[a-zA-Z0-9]*[_-][a-zA-Z0-9]*(?:_acq-[a-zA-Z0-9-]*)?(?:_run-\\d+)?_/g,\n acq_re:\n /sub-(.*?)(_task-\\w+.\\w+)?(_acq-[a-zA-Z0-9]*[_-][a-zA-Z0-9]*)(?:_run-\\d+)?_/g,\n sub_re: /sub-[a-zA-Z0-9]*[_-][a-zA-Z0-9]*_/g, // illegal character in sub\n ses_re: /ses-[a-zA-Z0-9]*[_-][a-zA-Z0-9]*?_(.*?)/g, //illegal character in ses\n}\n\nconst illegalchar_regex_list = [\n [re.task_re, 58, 'task name contains illegal character:'],\n [re.acq_re, 59, 'acq name contains illegal character:'],\n [re.sub_re, 62, 'sub name contains illegal character:'],\n [re.ses_re, 63, 'ses name contains illegal character:'],\n]\n\nconst illegalCharacterTest = (fileList) => {\n const issues = []\n const fileKeys = Object.keys(fileList)\n fileKeys.forEach((key) => {\n const file = fileList[key]\n const completename = file.relativePath\n if (\n !(\n completename.startsWith('/derivatives') ||\n completename.startsWith('/code') ||\n completename.startsWith('/sourcedata')\n )\n ) {\n illegalchar_regex_list.map((regex) => {\n const err_regex = regex[0]\n const err_code = regex[1]\n const err_evidence = regex[2]\n\n if (err_regex.exec(completename)) {\n issues.push(\n new Issue({\n file: file,\n code: err_code,\n evidence: err_evidence + completename,\n }),\n )\n }\n })\n }\n })\n return issues\n}\n\nexport default illegalCharacterTest\n" }, { "alpha_fraction": 0.6105417013168335, "alphanum_fraction": 0.617862343788147, "avg_line_length": 22.55172348022461, "blob_id": "2f27b6cccdea9bca52f3255fb652ba4a87803e76", "content_id": "8a092f01439b695caa6de0a291f36e4b86bd9e01", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 683, "license_type": "permissive", "max_line_length": 70, "num_lines": 29, "path": "/bids-validator/validators/bids/quickTest.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Quick Test\n *\n * A quick test to see if it could be a BIDS\n * dataset based on structure/naming. If it\n * could be it will trigger the full validation\n * otherwise it will throw a callback with a\n * generic error.\n */\nconst quickTest = (fileList) => {\n const keys = Object.keys(fileList)\n const couldBeBIDS = keys.some((key) => {\n const file = fileList[key]\n let path = file.relativePath\n if (path) {\n path = path.split('/')\n path = path.reverse()\n\n let pathIsSesOrSub =\n path[2] &&\n (path[2].indexOf('ses-') == 0 || path[2].indexOf('sub-') == 0)\n\n return pathIsSesOrSub\n }\n })\n return couldBeBIDS\n}\n\nexport default quickTest\n" }, { "alpha_fraction": 0.7176913619041443, "alphanum_fraction": 0.7176913619041443, "avg_line_length": 19.973684310913086, "blob_id": "08406906cdae2e9f5f3e577a13bce094194047a4", "content_id": "fb29857771dadf8108094392f2952fed7cbb6481", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 797, "license_type": "permissive", "max_line_length": 58, "num_lines": 38, "path": "/bids-validator/src/types/validation-result.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { DatasetIssues } from '../issues/datasetIssues.ts'\n\nexport interface SubjectMetadata {\n participantId: string\n age: number\n sex: string\n}\n/*\n BodyPart: {},\n ScannerManufacturer: {},\n ScannerManufacturersModelName: {},\n TracerName: {},\n TracerRadionuclide: {},\n*/\n\nexport interface SummaryOutput {\n sessions: string[]\n subjects: string[]\n subjectMetadata: SubjectMetadata[]\n tasks: string[]\n modalities: string[]\n secondaryModalities: string[]\n totalFiles: number\n size: number\n dataProcessed: boolean\n pet: Record<string, any>\n datatypes: string[]\n schemaVersion: string\n}\n\n/**\n * The output of a validation run\n */\nexport interface ValidationResult {\n issues: DatasetIssues\n summary: SummaryOutput\n derivativesSummary?: Record<string, ValidationResult>\n}\n" }, { "alpha_fraction": 0.5591892600059509, "alphanum_fraction": 0.5622551441192627, "avg_line_length": 33.13372039794922, "blob_id": "cdc2de6afe11dbaf57613fde9185726a891e8d5d", "content_id": "decc21412bcb5929ed5bfcd0967d96f6f37334d5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5871, "license_type": "permissive", "max_line_length": 79, "num_lines": 172, "path": "/bids-validator/bids_validator/bids_validator.py", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "\"\"\"Validation class for BIDS projects.\"\"\"\nimport re\nimport os\nimport json\n\n\nclass BIDSValidator():\n \"\"\"Object for BIDS (Brain Imaging Data Structure) verification.\n\n The main method of this class is `is_bids()`. You should use it for\n checking whether a file path is compatible with BIDS.\n\n \"\"\"\n\n def __init__(self, index_associated=True):\n \"\"\"Initialize BIDSValidator object.\n\n Parameters\n ----------\n index_associated : bool\n Specifies if an associated data should be checked. If it is true\n then any file paths in directories `code/`, `derivatives/`,\n `sourcedata/` and `stimuli/` will pass the validation, else they\n won't. Defaults to True.\n\n \"\"\"\n self.dir_rules = os.path.join(os.path.dirname(__file__)) + \"/rules/\"\n self.index_associated = index_associated\n\n def is_bids(self, path):\n \"\"\"Check if file path adheres to BIDS.\n\n Main method of the validator. uses other class methods for checking\n different aspects of the file path.\n\n Parameters\n ----------\n path : str\n Path of a file to be checked. Must be relative to root of a BIDS\n dataset.\n\n Notes\n -----\n When you test a file path, make sure that the path is relative to the\n root of the BIDS dataset the file is part of. That is, as soon as the\n file path contains parts outside of the BIDS dataset, the validation\n will fail. For example \"home/username/my_dataset/participants.tsv\" will\n fail, although \"participants.tsv\" is a valid BIDS file.\n\n Examples\n --------\n >>> from bids_validator import BIDSValidator\n >>> validator = BIDSValidator()\n >>> filepaths = [\"/sub-01/anat/sub-01_rec-CSD_T1w.nii.gz\",\n ... \"/sub-01/anat/sub-01_acq-23_rec-CSD_T1w.exe\", # wrong extension\n ... \"home/username/my_dataset/participants.tsv\", # not relative to root\n ... \"/participants.tsv\"]\n >>> for filepath in filepaths:\n ... print(validator.is_bids(filepath))\n True\n False\n False\n True\n\n \"\"\"\n conditions = []\n\n conditions.append(self.is_top_level(path))\n conditions.append(self.is_associated_data(path))\n conditions.append(self.is_session_level(path))\n conditions.append(self.is_subject_level(path))\n conditions.append(self.is_phenotypic(path))\n conditions.append(self.is_file(path))\n\n return (any(conditions))\n\n def is_top_level(self, path):\n \"\"\"Check if the file has appropriate name for a top-level file.\"\"\"\n regexps = self.get_regular_expressions(self.dir_rules +\n 'top_level_rules.json')\n\n conditions = [False if re.compile(x).search(path) is None else True for\n x in regexps]\n\n return (any(conditions))\n\n def is_associated_data(self, path):\n \"\"\"Check if file is appropriate associated data.\"\"\"\n if not self.index_associated:\n return False\n\n regexps = self.get_regular_expressions(self.dir_rules +\n 'associated_data_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None) for\n x in regexps]\n\n return any(conditions)\n\n def is_session_level(self, path):\n \"\"\"Check if the file has appropriate name for a session level.\"\"\"\n regexps = self.get_regular_expressions(self.dir_rules +\n 'session_level_rules.json')\n\n conditions = [self.conditional_match(x, path) for x in regexps]\n\n return (any(conditions))\n\n def is_subject_level(self, path):\n \"\"\"Check if the file has appropriate name for a subject level.\"\"\"\n regexps = self.get_regular_expressions(self.dir_rules +\n 'subject_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None) for\n x in regexps]\n\n return (any(conditions))\n\n def is_phenotypic(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions(self.dir_rules +\n 'phenotypic_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None) for\n x in regexps]\n\n return (any(conditions))\n\n def is_file(self, path):\n \"\"\"Check if file is phenotypic data.\"\"\"\n regexps = self.get_regular_expressions(self.dir_rules +\n 'file_level_rules.json')\n\n conditions = [(re.compile(x).search(path) is not None) for\n x in regexps]\n\n return (any(conditions))\n\n def get_regular_expressions(self, file_name):\n \"\"\"Read regular expressions from a file.\"\"\"\n regexps = []\n\n with open(file_name) as fin:\n rules = json.load(fin)\n\n for key in list(rules.keys()):\n rule = rules[key]\n\n regexp = rule[\"regexp\"]\n\n if \"tokens\" in rule:\n tokens = rule[\"tokens\"]\n\n for token in list(tokens):\n regexp = regexp.replace(token, \"|\".join(tokens[token]))\n\n regexps.append(regexp)\n\n return regexps\n\n def conditional_match(self, expression, path):\n \"\"\"Find conditional match.\"\"\"\n match = re.compile(expression).findall(path)\n match = match[0] if len(match) >= 1 else False\n # adapted from JS code and JS does not support conditional groups\n if (match):\n if ((match[1] == match[2][1:]) | (not match[1])):\n return True\n else:\n return False\n else:\n return False\n" }, { "alpha_fraction": 0.5161506533622742, "alphanum_fraction": 0.5304323434829712, "avg_line_length": 27.0078125, "blob_id": "a771fd1b1530d6cc9cd6a6998271b830d190bbc5", "content_id": "f4ca95b7582518162a106369704bdc1743c24e40", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 17925, "license_type": "permissive", "max_line_length": 93, "num_lines": 640, "path": "/bids-validator/validators/tsv/tsv.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import Issue from '../../utils/issues/issue'\nimport checkAcqTimeFormat from './checkAcqTimeFormat'\nimport checkAge89 from './checkAge89'\nimport checkHeaders from './checkHeaders'\nimport checkStatusCol from './checkStatusCol'\nimport checkTypecol from './checkTypeCol'\nimport parseTSV from './tsvParser'\nimport checkMotionComponent from './checkMotionComponent'\nvar path = require('path')\n\n/**\n * Format TSV headers for evidence string\n * @param {Array[string]} headers\n * @returns {string}\n */\nexport const headersEvidence = (headers) =>\n `Column headers: ${headers.join(', ')}`\n\n/**\n * Format TSV filename for evidence string\n * @param {Array[string]} filename\n * @returns {string}\n */\nconst filenameEvidence = (filename) => `Filename: ${filename}`\n\n/**\n * TSV\n *\n * Takes a TSV file as a string and a callback\n * as arguments. And callsback with any issues\n * it finds while validating against the BIDS\n * specification.\n */\n\nconst TSV = (file, contents, fileList, callback) => {\n const issues = []\n const stimPaths = []\n if (contents.includes('\\r') && !contents.includes('\\n')) {\n issues.push(\n new Issue({\n file: file,\n evidence: contents,\n code: 70,\n }),\n )\n callback(issues, null)\n return\n }\n\n // TSV Parser -----------------------------------------------------------\n const { headers, rows } = parseTSV(contents)\n\n // generic checks -----------------------------------------------------------\n let columnMismatch = false\n let emptyCells = false\n let NACells = false\n\n checkHeaders(headers, file, issues)\n\n for (let i = 1; i < rows.length; i++) {\n const values = rows[i]\n const evidence = `row ${i}: ${values.join('\\t')}`\n if (values.length === 1 && /^\\s*$/.test(values[0])) continue\n if (columnMismatch && emptyCells && NACells) break\n // check for different length rows\n if (values.length !== headers.length && !columnMismatch) {\n columnMismatch = true\n issues.push(\n new Issue({\n file: file,\n evidence,\n line: i + 1,\n code: 22,\n }),\n )\n }\n // iterate values\n for (let j = 0; j < values.length; j++) {\n const value = values[j]\n if (columnMismatch && emptyCells && NACells) break\n if (value === '' && !emptyCells) {\n emptyCells = true\n // empty cell should raise an error\n issues.push(\n new Issue({\n file: file,\n evidence,\n line: i + 1,\n reason: 'Missing value at column # ' + (j + 1),\n code: 23,\n }),\n )\n } else if (\n (value === 'NA' ||\n value === 'na' ||\n value === 'nan' ||\n value === 'NaN') &&\n !NACells\n ) {\n NACells = true\n // check if missing value is properly labeled as 'n/a'\n issues.push(\n new Issue({\n file: file,\n evidence,\n line: i + 1,\n reason: 'Missing value at column # ' + (j + 1),\n code: 24,\n }),\n )\n }\n }\n }\n\n // specific file checks -----------------------------------------------------\n const checkheader = function checkheader(\n headername,\n idx,\n file,\n missingCode,\n orderCode = null,\n ) {\n let code = missingCode\n if (headers.includes(headername) && orderCode) {\n code = orderCode\n }\n\n if (headers[idx] !== headername) {\n issues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n character: rows[0].indexOf(headers[idx]),\n code: code,\n }),\n )\n }\n }\n\n // events.tsv\n if (file.name.endsWith('_events.tsv')) {\n if (headers.length == 0 || headers[0] !== 'onset') {\n issues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 20,\n }),\n )\n }\n if (headers.length < 2 || headers[1].trim() !== 'duration') {\n issues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 21,\n }),\n )\n }\n\n // create full dataset path list\n const pathList = []\n for (let f in fileList) {\n if (fileList.hasOwnProperty(f)) {\n pathList.push(fileList[f].relativePath)\n }\n }\n\n // check for stimuli file\n const stimFiles = []\n if (headers.indexOf('stim_file') > -1) {\n for (let k = 0; k < rows.length; k++) {\n const stimFile = rows[k][headers.indexOf('stim_file')]\n const stimPath = '/stimuli/' + stimFile\n if (\n stimFile &&\n stimFile !== 'n/a' &&\n stimFile !== 'stim_file' &&\n stimFiles.indexOf(stimFile) == -1\n ) {\n stimFiles.push(stimFile)\n stimPaths.push(stimPath)\n if (pathList.indexOf(stimPath) == -1) {\n issues.push(\n new Issue({\n file: file,\n evidence: stimFile,\n reason:\n 'A stimulus file (' +\n stimFile +\n ') was declared but not found in /stimuli.',\n line: k + 1,\n character: rows[k].indexOf(stimFile),\n code: 52,\n }),\n )\n }\n }\n }\n }\n }\n\n // participants.tsv\n let participants = null\n if (\n file.name === 'participants.tsv' ||\n file.relativePath.includes('phenotype/')\n ) {\n const participantIdColumn = headers.indexOf('participant_id')\n\n // if the participant_id column is missing, an error\n // will be raised\n if (participantIdColumn === -1) {\n issues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 48,\n }),\n )\n } else {\n // otherwise, the participants should comprise of\n // sub-<subject_id> and one subject per row\n participants = []\n for (let l = 1; l < rows.length; l++) {\n const row = rows[l]\n // skip empty rows\n if (!row || /^\\s*$/.test(row)) {\n continue\n }\n\n // check if any incorrect patterns in participant_id column\n if (!row[participantIdColumn].startsWith('sub-')) {\n issues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n reason:\n 'Participant_id column should be named ' +\n 'as sub-<subject_id>.',\n line: l,\n code: 212,\n }),\n )\n }\n\n // obtain a list of the subject IDs in the participants.tsv file\n const participant = row[participantIdColumn].replace('sub-', '')\n if (participant == 'emptyroom') {\n continue\n }\n participants.push(participant)\n }\n }\n }\n\n // samples.tsv\n let samples = null\n if (file.name === 'samples.tsv') {\n const sampleIssues = []\n const sampleIdColumnValues = []\n const participantIdColumnValues = []\n const sampleIdColumn = headers.indexOf('sample_id')\n const participantIdColumn = headers.indexOf('participant_id')\n const sampleTypeColumn = headers.indexOf('sample_type')\n\n // if the sample_id column is missing, an error\n // will be raised\n if (sampleIdColumn === -1) {\n sampleIssues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 216,\n }),\n )\n }\n // if the participant_id column is missing, an error\n // will be raised\n if (participantIdColumn === -1) {\n sampleIssues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 217,\n }),\n )\n }\n // if the sample_type column is missing, an error\n // will be raised\n if (sampleTypeColumn === -1) {\n sampleIssues.push(\n new Issue({\n file: file,\n evidence: headersEvidence(headers),\n line: 1,\n code: 218,\n }),\n )\n }\n // Fold sampleIssues into main issue array, only needed it for this\n // conditional.\n issues.push(...sampleIssues)\n if (sampleIssues.length === 0) {\n // otherwise, the samples should comprise of\n // sample-<sample_id> and one sample per row\n samples = []\n for (let l = 1; l < rows.length; l++) {\n const row = rows[l]\n // skip empty rows\n if (!row || /^\\s*$/.test(row)) {\n continue\n }\n sampleIdColumnValues.push(row[sampleIdColumn])\n\n // check if any incorrect patterns in sample_id column\n if (!row[sampleIdColumn].startsWith('sample-')) {\n issues.push(\n new Issue({\n file: file,\n evidence: row[sampleIdColumn],\n reason:\n 'sample_id column should be named ' + 'as sample-<sample_id>.',\n line: l,\n code: 215,\n }),\n )\n }\n }\n // The participants should comprise of\n // sub-<subject_id> and one subject per row\n participants = []\n for (let l = 1; l < rows.length; l++) {\n const row = rows[l]\n // skip empty rows\n if (!row || /^\\s*$/.test(row)) {\n continue\n }\n participantIdColumnValues.push(row[participantIdColumn])\n\n // check if any incorrect patterns in participant_id column\n if (!row[participantIdColumn].startsWith('sub-')) {\n issues.push(\n new Issue({\n file: file,\n evidence: row[participantIdColumn],\n reason:\n 'Participant_id column should be named ' +\n 'as sub-<subject_id>.',\n line: l,\n code: 212,\n }),\n )\n }\n\n // obtain a list of the sample IDs in the samples.tsv file\n const sample = row[sampleIdColumn].replace('sample-', '')\n if (sample == 'emptyroom') {\n continue\n }\n samples.push(sample)\n }\n\n // check if a sample from same subject is described by one and only one row\n let samplePartIdsSet = new Set()\n for (let r = 0; r < rows.length - 1; r++) {\n let uniqueString = sampleIdColumnValues[r].concat(\n participantIdColumnValues[r],\n )\n // check if SampleId Have Duplicate\n if (samplePartIdsSet.has(uniqueString)) {\n issues.push(\n new Issue({\n file: file,\n evidence: sampleIdColumnValues,\n reason:\n 'Each sample from a same subject MUST be described by one and only one row.',\n line: 1,\n code: 220,\n }),\n )\n break\n } else samplePartIdsSet.add(uniqueString)\n }\n }\n\n if (sampleTypeColumn !== -1) {\n // check if any incorrect patterns in sample_type column\n const validSampleTypes = [\n 'cell line',\n 'in vitro differentiated cells',\n 'primary cell',\n 'cell-free sample',\n 'cloning host',\n 'tissue',\n 'whole organisms',\n 'organoid',\n 'technical sample',\n ]\n for (let c = 1; c < rows.length; c++) {\n const row = rows[c]\n if (!validSampleTypes.includes(row[sampleTypeColumn])) {\n issues.push(\n new Issue({\n file: file,\n evidence: row[sampleTypeColumn],\n reason: \"sample_type can't be any value.\",\n line: c + 1,\n code: 219,\n }),\n )\n }\n }\n }\n }\n\n if (\n file.relativePath.includes('/meg/') &&\n file.name.endsWith('_channels.tsv')\n ) {\n checkheader('name', 0, file, 71, 230)\n checkheader('type', 1, file, 71, 230)\n checkheader('units', 2, file, 71, 230)\n checkStatusCol(rows, file, issues)\n checkTypecol(rows, file, issues)\n }\n\n if (\n file.relativePath.includes('/eeg/') &&\n file.name.endsWith('_channels.tsv')\n ) {\n checkheader('name', 0, file, 71, 230)\n checkheader('type', 1, file, 71, 230)\n checkheader('units', 2, file, 71, 230)\n checkStatusCol(rows, file, issues)\n checkTypecol(rows, file, issues)\n }\n\n if (\n file.relativePath.includes('/ieeg/') &&\n file.name.endsWith('_channels.tsv')\n ) {\n checkheader('name', 0, file, 72, 229)\n checkheader('type', 1, file, 72, 229)\n checkheader('units', 2, file, 72, 229)\n checkheader('low_cutoff', 3, file, 72, 229)\n checkheader('high_cutoff', 4, file, 72, 229)\n checkStatusCol(rows, file, issues)\n checkTypecol(rows, file, issues)\n }\n\n if (\n file.relativePath.includes('/motion/') &&\n file.name.endsWith('_channels.tsv')\n ) {\n const required = ['component', 'name', 'tracked_point', 'type', 'units']\n const missing = required.filter((x) => !headers.includes(x))\n if (missing.length) {\n issues.push(\n new Issue({\n line: 1,\n file: file,\n code: 129,\n evidence: `Missing Columns: ${missing.join(', ')}`,\n }),\n )\n }\n checkStatusCol(rows, file, issues)\n checkTypecol(rows, file, issues)\n checkMotionComponent(rows, file, issues)\n }\n if (\n file.relativePath.includes('/nirs/') &&\n file.name.endsWith('_channels.tsv')\n ) {\n checkheader('name', 0, file, 234)\n checkheader('type', 1, file, 234)\n checkheader('source', 2, file, 234)\n checkheader('detector', 3, file, 234)\n checkheader('wavelength_nominal', 4, file, 234)\n checkheader('units', 5, file, 234)\n checkStatusCol(rows, file, issues)\n checkTypecol(rows, file, issues)\n }\n\n // electrodes.tsv\n if (\n file.relativePath.includes('/eeg/') &&\n file.name.endsWith('_electrodes.tsv')\n ) {\n checkheader('name', 0, file, 96)\n checkheader('x', 1, file, 96)\n checkheader('y', 2, file, 96)\n checkheader('z', 3, file, 96)\n }\n\n if (\n file.relativePath.includes('/ieeg/') &&\n file.name.endsWith('_electrodes.tsv')\n ) {\n checkheader('name', 0, file, 73)\n checkheader('x', 1, file, 73)\n checkheader('y', 2, file, 73)\n checkheader('z', 3, file, 73)\n checkheader('size', 4, file, 73)\n }\n\n if (\n file.relativePath.includes('/nirs/') &&\n file.name.endsWith('_optodes.tsv')\n ) {\n checkheader('name', 0, file, 233)\n checkheader('type', 1, file, 233)\n checkheader('x', 2, file, 233)\n checkheader('y', 3, file, 233)\n checkheader('z', 4, file, 233)\n }\n\n // blood.tsv\n if (file.relativePath.includes('/pet/') && file.name.endsWith('_blood.tsv')) {\n // Validate fields here\n checkheader('time', 0, file, 126)\n }\n\n // check for valid SI units\n /*\n * Commenting out call to validation until it is inline with spec:\n * https://github.com/bids-standard/bids-specification/pull/411\n if (headers.includes('units')) {\n const unitIndex = headers.indexOf('units')\n rows\n // discard headers\n .slice(1)\n // extract unit values\n .map((row, i) => ({\n unit: row[unitIndex],\n line: i + 2,\n }))\n .forEach(({ unit, line }) => {\n const { isValid, evidence } = utils.unit.validate(unit)\n if (!isValid)\n issues.push(\n new Issue({\n line,\n file,\n code: 124,\n evidence,\n }),\n )\n })\n }\n */\n\n // check partcipants.tsv for age 89+\n if (file.name === 'participants.tsv') {\n checkAge89(rows, file, issues)\n }\n\n if (file.name.endsWith('_scans.tsv')) {\n // get the directory path for the scans.tsv\n const scanDirPath = path.dirname(file.relativePath)\n\n // get the subject and session for this scans.tsv file\n const subject = file.name.split('_').slice(0, 1)\n\n // get the relative subject path\n const subRelativePath = '/' + subject\n\n // get list of file paths for this subject and session\n const pathList = []\n for (let file of Object.values(fileList)) {\n const fPath = file.relativePath\n\n // XXX: needs to be improved, since this currently allows arbitrary directory nesting\n // dataset file needs to be within the subject\n // and session directory\n if (fPath.startsWith(subRelativePath)) {\n if (fPath.includes('.ds/') || fPath.includes('_meg/')) {\n // CTF or BTI data\n const fDir = path.dirname(fPath)\n pathList.push(fDir)\n } else if (fPath.includes('_ieeg.mefd/')) {\n // MEF3 data\n const fDir = fPath.substring(0, fPath.indexOf('_ieeg.mefd/') + 10)\n if (!pathList.includes(fDir)) {\n pathList.push(fDir)\n }\n } else {\n // all other data kinds\n pathList.push(fPath)\n }\n }\n }\n\n // check _scans.tsv for column filename\n if (!(headers.indexOf('filename') > -1)) {\n issues.push(\n new Issue({\n line: 1,\n file: file,\n evidence: headersEvidence(headers),\n code: 68,\n }),\n )\n } else {\n // check scans filenames match pathList\n const filenameColumn = headers.indexOf('filename')\n for (let l = 1; l < rows.length; l++) {\n const row = rows[l]\n const scanRelativePath = row[filenameColumn]\n const scanFullPath = scanDirPath + '/' + scanRelativePath\n\n // check if scan matches full dataset path list\n if (!pathList.includes(scanFullPath)) {\n issues.push(\n new Issue({\n line: l,\n file: file,\n code: 129,\n evidence: filenameEvidence(scanFullPath),\n }),\n )\n }\n }\n }\n\n // if _scans.tsv has the acq_time header, check datetime format\n if (headers.indexOf('acq_time') > -1) {\n checkAcqTimeFormat(rows, file, issues)\n }\n }\n callback(issues, participants, stimPaths)\n}\nexport default TSV\n" }, { "alpha_fraction": 0.5254597067832947, "alphanum_fraction": 0.5332390666007996, "avg_line_length": 24.94495391845703, "blob_id": "a4d4bbaf071c0029f656aa4243a1e51d4ca3bbb5", "content_id": "74d7528f1f24ccc35329e2543c1aeec7f6995e59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2828, "license_type": "permissive", "max_line_length": 119, "num_lines": 109, "path": "/bids-validator/src/schema/applyRules.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// @ts-nocheck\nimport { assert, assertEquals, assertObjectMatch } from '../deps/asserts.ts'\nimport { loadSchema } from '../setup/loadSchema.ts'\nimport { applyRules, evalCheck } from './applyRules.ts'\nimport { DatasetIssues } from '../issues/datasetIssues.ts'\n\nconst ruleContextData = [\n {\n path: ['rules', 'checks', 'dwi', 'DWIVolumeCount'],\n context: {\n suffix: 'dwi',\n associations: {\n bvec: {\n n_cols: 4,\n },\n bval: {\n n_cols: 4,\n },\n },\n nifti_header: {\n dim: [0, 0, 0, 0, 4],\n },\n },\n },\n]\n\nconst schemaDefs = {\n rules: {\n checks: {\n dwi: {\n DWIVolumeCount: {\n code: 'VOLUME_COUNT_MISMATCH',\n description:\n 'The number of volumes in this scan does not match the number of volumes in the\\ncorresponding .bvec a...',\n level: 'error',\n selectors: [\n 'suffix == \"dwi\"',\n '\"bval\" in associations',\n '\"bvec\" in associations',\n ],\n checks: [\n 'associations.bval.n_cols == nifti_header.dim[4]',\n 'associations.bvec.n_cols == nifti_header.dim[4]',\n ],\n },\n },\n },\n },\n}\n\nDeno.test('evalCheck test', () => {\n ruleContextData.map((rcd) => {\n const rule = rcd.path.reduce((obj, key) => obj[key], schemaDefs)\n rule.selectors.map((selector: string) => {\n assert(evalCheck(selector, rcd.context), `${selector}, ${rcd.context}`)\n })\n rule.checks.map((check: string) => {\n assert(evalCheck(check, rcd.context), `${check}, ${rcd.context}`)\n })\n })\n})\n\nDeno.test('evalCheck ensure constructor access', () => {\n assert(\n evalCheck('foo.constructor.isArray(foo)', { foo: [1] }),\n 'can not access Array prototype via constructor',\n )\n})\n\nDeno.test('evalCheck built in apis fail', () => {\n assert(evalCheck('fetch', {}) === undefined, 'fetch in evalCheck namespace')\n})\n\nDeno.test('evalCheck ensure expression language functions work', () => {\n const context = {\n x: [1, 2, 3, 4],\n y: [1, 1, 1, 1],\n issues: new DatasetIssues(),\n }\n const rule = [\n {\n selectors: ['true'],\n checks: [\n 'intersects(x, y)',\n 'match(\"teststr\", \"est\")',\n 'type(x) == \"array\" && type(5) == \"number\"',\n 'max(x) == 4',\n 'min(x) == min(y)',\n 'length(y) == count(y, 1)',\n ],\n },\n ]\n applyRules(rule, context)\n assert(!context.issues.hasIssue({ key: 'CHECK_ERROR' }))\n})\nDeno.test(\n 'evalCheck ensure expression language will fail appropriately',\n () => {\n const context = { issues: new DatasetIssues() }\n const rule = [\n {\n selectors: ['true'],\n checks: ['length(1)'],\n },\n ]\n applyRules(rule, context)\n assert(context.issues.hasIssue({ key: 'CHECK_ERROR' }))\n },\n)\n" }, { "alpha_fraction": 0.6265895962715149, "alphanum_fraction": 0.6289017200469971, "avg_line_length": 26.03125, "blob_id": "019af6655601d7873f9eb921bc2dadef212639c5", "content_id": "8c048b9fb193018b3953252de3c756bf99b5e882", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1730, "license_type": "permissive", "max_line_length": 80, "num_lines": 64, "path": "/bids-validator/utils/summary/collectPetFields.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * Gets the Target, Scanner Mfg, Radiotracer, and Radionuclide from json sidecar\n * @param {*} fileList\n */\nconst collectPetFields = (jsonContentsDict) => {\n const fields = {\n BodyPart: {},\n ScannerManufacturer: {},\n ScannerManufacturersModelName: {},\n TracerName: {},\n TracerRadionuclide: {},\n }\n\n // tally up values in fields from all pet.json files\n Object.entries(jsonContentsDict).forEach(([filepath, json]) => {\n if (filepath.endsWith('pet.json')) {\n record(fields, 'ScannerManufacturer', json.Manufacturer)\n record(\n fields,\n 'ScannerManufacturersModelName',\n json.ManufacturersModelName,\n )\n record(fields, 'TracerName', json.TracerName)\n record(fields, 'TracerRadionuclide', json.TracerRadionuclide)\n if (json.BodyPart) record(fields, 'BodyPart', json.BodyPart)\n }\n })\n\n return ordered(fields)\n}\n\nconst record = (fields, field, value) => {\n if (fields[field][value]) {\n fields[field][value]++\n } else {\n fields[field][value] = 1\n }\n}\n\n/**\n * Takes each field of tallies and converts it to an ordered list (pure).\n */\nconst ordered = (fields) => {\n const orderedFields = {}\n Object.keys(fields).forEach((key) => {\n orderedFields[key] = orderedList(fields[key])\n })\n return orderedFields\n}\n\n/**\n * Given tallies = { a: 3, b: 5, c: 1 }, returns ['b', 'a', 'c']\n * @param {object} tallies\n * @returns {string[]}\n */\nexport const orderedList = (tallies) =>\n Object.keys(tallies)\n // convert object to list of key/value pairs\n .map((key) => ({ key, count: tallies[key] }))\n // sort by count, greatest to least\n .sort(({ count: a }, { count: b }) => b - a)\n .map(({ key }) => key)\n\nexport default collectPetFields\n" }, { "alpha_fraction": 0.502808153629303, "alphanum_fraction": 0.5063552856445312, "avg_line_length": 25.22480583190918, "blob_id": "b9b4f05ac8693bc18c70e31b38a10d1346134578", "content_id": "57067a9767a698f5a9b252d0b79391a92fab4234", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3383, "license_type": "permissive", "max_line_length": 77, "num_lines": 129, "path": "/bids-validator-web/components/results/Results.jsx", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "// dependencies -----------------------------------------------------------\n\nimport React from 'react'\nimport { Card, Collapse, Alert } from 'react-bootstrap'\nimport PropTypes from 'prop-types'\nimport pluralize from 'pluralize'\nimport Issues from './Issues'\n\n// component setup --------------------------------------------------------\n\nexport default class ValidationResults extends React.Component {\n constructor() {\n super()\n this.state = {\n warningsOpen: false,\n errorsOpen: false,\n }\n }\n // life cycle events ------------------------------------------------------\n\n render() {\n let errors = this.props.errors\n let warnings = this.props.warnings\n\n // errors\n let errorsWrap\n if (errors.length > 0) {\n let fileCount = this._countFiles(errors)\n let errorHeader = (\n <span>\n view {errors.length} {pluralize('error', errors.length)}{' '}\n {this._countText(fileCount)}\n </span>\n )\n errorsWrap = (\n <Card className=\"fadeIn upload-panel error-wrap my-3\" key=\"1\">\n <Alert\n onClick={() =>\n this.setState({ errorsOpen: !this.state.errorsOpen })\n }\n aria-controls=\"errors-issues\"\n aria-expanded={this.state.errorsOpen}\n variant=\"danger\"\n className=\"mb-0\">\n {errorHeader}\n </Alert>\n <Collapse in={this.state.errorsOpen}>\n <Card.Body id=\"errors-issues\">\n <Issues issues={errors} issueType=\"Error\" />\n </Card.Body>\n </Collapse>\n </Card>\n )\n }\n\n //warnings\n let warningWrap\n if (warnings.length > 0) {\n let fileCount = this._countFiles(warnings)\n let warningHeader = (\n <span>\n view {warnings.length} {pluralize('warning', warnings.length)}{' '}\n {this._countText(fileCount)}\n </span>\n )\n warningWrap = (\n <Card className=\"fadeIn upload-panel warning-wrap my-3\" key=\"2\">\n <Alert\n onClick={() =>\n this.setState({ warningsOpen: !this.state.warningsOpen })\n }\n aria-controls=\"warning-issues\"\n aria-expanded={this.state.warningsOpen}\n variant=\"warning\"\n className=\"mb-0\">\n {warningHeader}\n </Alert>\n <Collapse in={this.state.warningsOpen}>\n <Card.Body id=\"warning-issues\">\n <Issues issues={warnings} issueType=\"Warning\" />\n </Card.Body>\n </Collapse>\n </Card>\n )\n }\n\n // validations errors and warning wraps\n return (\n // <Menu className=\"validation-messages\" accordion>\n <div>\n {errorsWrap}\n {warningWrap}\n </div>\n // </Menu>\n )\n }\n\n // custom methods ---------------------------------------------------------\n\n _countFiles(issues) {\n let numFiles = 0\n for (let issue of issues) {\n if (issue.files.length > 1 || !!issue.files[0].file) {\n numFiles += issue.files.length\n }\n }\n return numFiles\n }\n\n _countText(count) {\n if (count > 0) {\n return (\n <span>\n in {count} {pluralize('files', count)}\n </span>\n )\n }\n }\n}\n\nValidationResults.propTypes = {\n errors: PropTypes.array,\n warnings: PropTypes.array,\n}\n\nValidationResults.Props = {\n errors: [],\n warnings: [],\n}\n" }, { "alpha_fraction": 0.6443905830383301, "alphanum_fraction": 0.6513158082962036, "avg_line_length": 25.01801872253418, "blob_id": "b7d58ff0b5c3df23b5e1362e6f80e857c9867f7d", "content_id": "d800e57e179d38aff4978d704863e626cbbf8149", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2888, "license_type": "permissive", "max_line_length": 134, "num_lines": 111, "path": "/bids-validator/validators/bids/subSesMismatchTest.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\n\n/**\n * subid and sesid mismatch test. Generates error if ses-id and sub-id are different for any file, Takes a file list and return issues\n */\nconst subSesMismatchTest = (fileList) => {\n const issues = []\n\n // validates if sub/ses-id in filename matches with ses/sub directory file is saved\n const fileKeys = Object.keys(fileList)\n fileKeys.forEach((key) => {\n let file = fileList[key]\n if (utils.type.file.isStimuliData(file.relativePath)) {\n return\n }\n const values = getPathandFileValues(file.relativePath)\n\n const pathValues = values[0]\n const fileValues = values[1]\n\n if (fileValues.sub !== null || fileValues.ses !== null) {\n const subMismatch = fileValues.sub !== pathValues.sub\n const sesMismatch = fileValues.ses !== pathValues.ses\n\n if (subMismatch) {\n issues.push(mismatchError('subject', file))\n }\n\n if (sesMismatch) {\n issues.push(mismatchError('session', file))\n }\n }\n })\n return issues\n}\n\n/**\n * getPathandFileValues\n * Takes a file path and returns values\n * found related to subject and session keys for both path and file keys.\n *\n * @param {string} path the string to extract subject and session level values\n */\nconst getPathandFileValues = (path) => {\n const values = {}\n const file_name = {}\n\n // capture subject\n values.sub = captureFromPath(path, /^\\/sub-([a-zA-Z0-9]+)/)\n\n // capture session\n values.ses = captureFromPath(path, /^\\/sub-[a-zA-Z0-9]+\\/ses-([a-zA-Z0-9]+)/)\n\n //capture session and subject id from filename to find if files are in\n // correct sub/ses directory\n const filename = path.replace(/^.*[\\\\/]/, '')\n\n // capture sub from file name\n file_name.sub = captureFromPath(filename, /^sub-([a-zA-Z0-9]+)/)\n\n // capture session from file name\n file_name.ses = captureFromPath(\n filename,\n /^sub-[a-zA-Z0-9]+_ses-([a-zA-Z0-9]+)/,\n )\n\n return [values, file_name]\n}\n\n/**\n * CaptureFromPath\n *\n * takes a file path and a regex and\n * returns the matched value or null\n *\n * @param {string} path path to test regex against\n * @param {regex} regex regex pattern we wish to test\n */\nconst captureFromPath = (path, regex) => {\n const match = regex.exec(path)\n return match && match[1] ? match[1] : null\n}\n\n/**\n *\n * Mismatch Error\n *\n * generates the Issue object for session / subject\n * mismatch\n *\n * @param {string} type error type - session or subject\n * @param {object} file file responsible for the error\n */\nconst mismatchError = (type, file) => {\n let code, abbrv\n if (type == 'session') {\n code = 65\n abbrv = 'ses'\n } else {\n code = 64\n abbrv = 'sub'\n }\n return new Issue({\n code: code,\n evidence: `File: ${file.relativePath} is saved in incorrect ${type} directory as per ${abbrv}-id in filename.`,\n file: file,\n })\n}\n\nexport default subSesMismatchTest\n" }, { "alpha_fraction": 0.4087095558643341, "alphanum_fraction": 0.4155013859272003, "avg_line_length": 30.287500381469727, "blob_id": "53f4754c320f86162a11764b6945ed16e694c6b6", "content_id": "46b1408bbee03aa27f9ae1c65126173014d212b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2503, "license_type": "permissive", "max_line_length": 113, "num_lines": 80, "path": "/bids-validator/validators/microscopy/validate.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\nconst Issue = utils.issues.Issue\nimport ometiff from './ometiff'\nimport validateTiffSignature from './validateTiffSignature'\n\nconst TIFF_ID = 0x2a\nconst BIG_TIFF_ID = 0x2b\n\nconst validate = (files, jsonContentsDict) => {\n let issues = []\n // validate ometiff\n const omePromises = files.map(function (file) {\n return utils.limit(\n () =>\n new Promise((resolve, reject) => {\n utils.files.readBuffer(file).then((buffer) => {\n if (validateTiffSignature(buffer, TIFF_ID)) {\n if (file.relativePath.endsWith('.ome.btf')) {\n issues.push(\n new Issue({\n code: 227,\n file: file,\n evidence: `Inconsistent TIFF file type and extension, given .ome.btf but should be .ome.tif`,\n }),\n )\n }\n utils.files\n .readOMEFile(buffer)\n .then((omeData) => {\n ometiff(\n file,\n omeData,\n jsonContentsDict,\n function (omeIssues) {\n issues = issues.concat(omeIssues)\n resolve()\n },\n )\n })\n .catch((err) =>\n utils.issues.redirect(err, reject, () => {\n issues.push(err)\n resolve()\n }),\n )\n } else if (validateTiffSignature(buffer, BIG_TIFF_ID)) {\n if (file.relativePath.endsWith('.ome.tif')) {\n issues.push(\n new Issue({\n code: 227,\n file: file,\n evidence: `Inconsistent TIFF file type and extension, given .ome.tif but should be .ome.btf`,\n }),\n )\n }\n issues.push(\n new Issue({\n code: 226,\n file: file,\n }),\n )\n resolve()\n } else {\n issues.push(\n new Issue({\n code: 227,\n file: file,\n evidence: `3rd byte of file does not identify file as tiff.`,\n }),\n )\n resolve()\n }\n })\n }),\n )\n })\n return Promise.all(omePromises).then(() => issues)\n}\n\nexport default validate\n" }, { "alpha_fraction": 0.5488126873970032, "alphanum_fraction": 0.6427440643310547, "avg_line_length": 33.87730026245117, "blob_id": "aff3c33a089961f7e4121fec508d47fb15ad6a31", "content_id": "c4dccaa4746f53a16c2bca9248d13428ef6efddb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5686, "license_type": "permissive", "max_line_length": 343, "num_lines": 163, "path": "/bids-validator/tests/data/pet001_jsonContentsDict.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export default {\n '/dataset_description.json': {\n BIDSVersion: '1.5.0',\n License: 'CCO license',\n Name: '[11C]CIMBI36 PET dataset of a pig',\n Authors: [\n 'Melanie Ganz-Benjaminsen',\n 'Martin Noergaard',\n 'Hanne Demant Hansen',\n ],\n Acknowledgements:\n 'Knudsen GM, Jensen PS, Erritzoe D, Baaré WFC, Ettrup A, Fisher PM, Gillings N, Hansen HD, Hansen LK, Hasselbalch SG, Henningsson S, Herth MM, Holst KK, Iversen P, Kessing LV, Macoveanu J, Madsen KS, Mortensen EL, Nielsen FÅ, Paulson OB, Siebner HR, Stenbæk DS, Svarer C, Jernigan TL, Strother SC, Frokjaer VG. The Center for Integrated Molecular Brain Imaging (Cimbi) Database. NeuroImage. 2016 Jan 1;124(Pt B):1213-1219',\n HowToAcknowledge: 'This data was obtained from the Cimbi database.',\n Funding: [\n 'Lundbeck Foundation R90-A7722',\n 'Danish Research Council 09-063598',\n 'Rigshospitalet',\n ],\n DatasetDOI: '',\n },\n '/participants.json': {\n participant_id: {\n LongName: 'Participant Id',\n Description: 'label identifying a particular subject',\n },\n weight: { LongName: 'Weight' },\n },\n '/sub-01/ses-01/anat/sub-01_ses-01_T1w.json': {\n Modality: 'MR',\n MagneticFieldStrength: 3,\n Manufacturer: 'Siemens',\n ManufacturersModelName: 'Trio',\n InstitutionName: 'DRCMR_HVIDOVRE',\n InstitutionAddress: '30Kettegaard_Alle_Copenhagen_Hvidovre_DK-2650_DK',\n DeviceSerialNumber: '20511',\n StationName: 'MRC20511',\n ProcedureStepDescription: 'Hjerne_projekter_Cimbi_SAD',\n SoftwareVersions: 'syngo_MR_A30_4VA30A',\n SeriesDescription: 't1_mpr_ns_sag_1mm_noring',\n ProtocolName: 't1_mpr_ns_sag_1mm_noring',\n ScanningSequence: 'IR_GR',\n SequenceVariant: 'SP_MP',\n ScanOptions: 'IR',\n SequenceName: '_tfl3d1_ns',\n ImageType: ['ORIGINAL', 'PRIMARY', 'M', 'ND'],\n AcquisitionTime: '10:24:59.860010',\n AcquisitionNumber: 1,\n ImageComments: '50524',\n EchoTime: 0.00304,\n RepetitionTime: 1.55,\n InversionTime: 0.8,\n FlipAngle: 9,\n PartialFourier: 1,\n BaseResolution: 256,\n PhaseResolution: 1,\n ReceiveCoilName: '8_Channel_Head',\n PulseSequenceDetails: '%SiemensSeq%_tfl',\n PercentPhaseFOV: 100,\n PhaseEncodingSteps: 256,\n AcquisitionMatrixPE: 256,\n ReconMatrixPE: 256,\n ConversionSoftware: 'dcm2niix',\n ConversionSoftwareVersion: 'v1.0.20170923 (OpenJPEG build) GCC4.8.4',\n },\n '/sub-01/ses-01/pet/sub-01_ses-01_pet.json': {\n Modality: 'PET',\n Manufacturer: 'Siemens',\n ManufacturersModelName:\n 'High-Resolution Research Tomograph (HRRT, CTI/Siemens)',\n BodyPart: 'Brain',\n Units: 'Bq/ml',\n TracerName: 'CIMBI-36',\n TracerRadionuclide: 'C11',\n TracerMolecularWeight: 380.28,\n TracerMolecularWeightUnits: 'g/mol',\n InjectedRadioactivity: 573,\n InjectedRadioactivityUnits: 'MBq',\n InjectedMass: 0.62,\n InjectedMassUnits: 'ug',\n SpecificRadioactivity: 353.51,\n SpecificRadioactivityUnits: 'GBq/ug',\n ModeOfAdministration: 'bolus',\n MolarActivity: 1.62,\n MolarActivityUnits: 'nmol',\n MolarActivityMeasTime: '12:59:00',\n TimeZero: '13:04:42',\n ScanStart: 0,\n InjectionStart: 0,\n FrameTimesStart: [\n 0, 10, 20, 30, 40, 50, 60, 80, 100, 120, 140, 160, 180, 240, 300, 360,\n 420, 480, 540, 660, 780, 900, 1020, 1140, 1260, 1380, 1500, 1800, 2100,\n 2400, 2700, 3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400, 5700,\n 6000, 6300, 6600, 6900,\n ],\n FrameDuration: [\n 10, 20, 30, 40, 50, 60, 80, 100, 120, 140, 160, 180, 240, 300, 360, 420,\n 480, 540, 660, 780, 900, 1020, 1140, 1260, 1380, 1500, 1800, 2100, 2400,\n 2700, 3000, 3300, 3600, 3900, 4200, 4500, 4800, 5100, 5400, 5700, 6000,\n 6300, 6600, 6900, 7200,\n ],\n AcquisitionMode: 'list mode',\n ImageDecayCorrected: true,\n ImageDecayCorrectionTime: 0,\n ReconMatrixSize: [256, 256, 207, 45],\n ImageVoxelSize: [1.2188, 1.2188, 1.2188],\n ReconMethodName: '3D-OSEM-PSF',\n ReconMethodParameterLabels: ['subsets', 'iterations'],\n ReconMethodParameterUnits: ['none', 'none'],\n ReconMethodParameterValues: [16, 10],\n ReconFilterType: 'none',\n ReconFilterSize: 0,\n AttenuationCorrection: '[137Cs]transmission scan-based',\n },\n '/sub-01/ses-01/pet/sub-01_ses-01_recording-autosampler_blood.json': {\n PlasmaAvail: false,\n WholeBloodAvail: true,\n MetaboliteAvail: false,\n DispersionCorrected: false,\n time: {\n Description: 'Time in relation to time zero defined by the _pet.json',\n Units: 's',\n },\n whole_blood_radioactivity: {\n Description:\n 'Radioactivity in uncorrected whole blood samples from Allogg autosampler.',\n Units: 'kBq/ml',\n },\n },\n '/sub-01/ses-01/pet/sub-01_ses-01_recording-manual_blood.json': {\n PlasmaAvail: true,\n WholeBloodAvail: true,\n MetaboliteAvail: true,\n MetaboliteMethod: 'HPLC',\n MetaboliteRecoveryCorrectionApplied: false,\n DispersionCorrected: false,\n time: {\n Description: 'Time in relation to time zero defined by the _pet.json',\n Units: 's',\n },\n plasma_radioactivity: {\n Description:\n 'Radioactivity in plasma samples. Measured using COBRA counter.',\n Units: 'kBq/ml',\n },\n whole_blood_radioactivity: {\n Description:\n 'Radioactivity in whole blood samples. Measured using COBRA counter.',\n Units: 'kBq/ml',\n },\n metabolite_parent_fraction: {\n Description: 'Parent fraction of the radiotracer.',\n Units: 'unitless',\n },\n metabolite_polar_fraction: {\n Description: 'Polar metabolite fraction of the radiotracer.',\n Units: 'unitless',\n },\n metabolite_lipophilic_fraction: {\n Description: 'Lipophilic metabolite fraction of the radiotracer.',\n Units: 'unitless',\n },\n },\n}\n" }, { "alpha_fraction": 0.6850363612174988, "alphanum_fraction": 0.6873325705528259, "avg_line_length": 41.83606719970703, "blob_id": "7817353366ca3f7c71da70731764ed57d9d763bf", "content_id": "1b37aa4efa0832a65800d6acf20b301fa315a8a9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2613, "license_type": "permissive", "max_line_length": 94, "num_lines": 61, "path": "/bids-validator/src/files/deno.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals, assertRejects } from '../deps/asserts.ts'\nimport { readAll, readerFromStreamReader } from '../deps/stream.ts'\nimport { dirname, basename, join } from '../deps/path.ts'\nimport { BIDSFileDeno, UnicodeDecodeError } from './deno.ts'\nimport { requestReadPermission } from '../setup/requestPermissions.ts'\nimport { FileIgnoreRules } from './ignore.ts'\n\nawait requestReadPermission()\n\n// Use this file for testing file behavior\nconst testUrl = import.meta.url\nconst testPath = testUrl.slice('file://'.length)\nconst testDir = dirname(testPath)\nconst testFilename = basename(testPath)\nconst ignore = new FileIgnoreRules([])\n\nDeno.test('Deno implementation of BIDSFile', async (t) => {\n await t.step('implements basic file properties', () => {\n const file = new BIDSFileDeno(testDir, testFilename, ignore)\n assertEquals(join(testDir, file.path), testPath)\n })\n await t.step('implements correct file size', async () => {\n const { size } = await Deno.stat(testPath)\n const file = new BIDSFileDeno(testDir, testFilename, ignore)\n assertEquals(await file.size, size)\n })\n await t.step('can be read as ReadableStream', async () => {\n const file = new BIDSFileDeno(testDir, testFilename, ignore)\n const stream = file.stream\n const streamReader = stream.getReader()\n const denoReader = readerFromStreamReader(streamReader)\n const fileBuffer = await readAll(denoReader)\n assertEquals(await file.size, fileBuffer.length)\n })\n await t.step('can be read with .text() method', async () => {\n const file = new BIDSFileDeno(testDir, testFilename, ignore)\n const text = await file.text()\n assertEquals(await file.size, text.length)\n })\n await t.step(\n 'throws UnicodeDecodeError when reading a UTF-16 file with text() method',\n async () => {\n // BOM is invalid in JSON but shows up often from certain tools, so abstract handling it\n const bomDir = join(testPath, '..', '..', 'tests')\n const bomFilename = 'bom-utf16.tsv'\n const file = new BIDSFileDeno(bomDir, bomFilename, ignore)\n await assertRejects(async () => file.text(), UnicodeDecodeError)\n },\n )\n await t.step(\n 'strips BOM characters when reading UTF-8 via .text()',\n async () => {\n // BOM is invalid in JSON but shows up often from certain tools, so abstract handling it\n const bomDir = join(testPath, '..', '..', 'tests')\n const bomFilename = 'bom-utf8.json'\n const file = new BIDSFileDeno(bomDir, bomFilename, ignore)\n const text = await file.text()\n assertEquals(text, '{\\n \"example\": \"JSON for test suite\"\\n}\\n')\n },\n )\n})\n" }, { "alpha_fraction": 0.610230565071106, "alphanum_fraction": 0.610230565071106, "avg_line_length": 25.69230842590332, "blob_id": "084355542bffc7b5783634ac7f99a73b83aabe54", "content_id": "9c0c0e7203940e7602bc8a31f007efee0edbe9e4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1388, "license_type": "permissive", "max_line_length": 68, "num_lines": 52, "path": "/bids-validator/validators/json/load.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import utils from '../../utils'\n\nclass JSONParseError extends Error {\n constructor(message) {\n super(message)\n this.name = 'JSONParseError'\n }\n}\n\nconst load = (files, jsonFiles, jsonContentsDict, annexed, dir) => {\n let issues = []\n\n // Read JSON file contents and parse for issues\n const readJsonFile = (file, annexed, dir) =>\n utils.files\n .readFile(file, annexed, dir)\n .then((contents) => utils.json.parse(file, contents))\n .then(({ issues: parseIssues, parsed }) => {\n // Append any parse issues to returned issues\n Array.prototype.push.apply(issues, parseIssues)\n\n // Abort further tests if an error is found\n if (\n parseIssues &&\n parseIssues.some((issue) => issue.severity === 'error')\n ) {\n throw new JSONParseError('Aborted due to parse error')\n }\n\n jsonContentsDict[file.relativePath] = parsed\n jsonFiles.push(file)\n })\n\n // Start concurrent read/parses\n const fileReads = files.map((file) =>\n utils.limit(() => readJsonFile(file, annexed, dir)),\n )\n\n // After all reads/parses complete, return any found issues\n return Promise.all(fileReads)\n .then(() => issues)\n .catch((err) => {\n // Handle early exit\n if (err instanceof JSONParseError) {\n return issues\n } else {\n throw err\n }\n })\n}\n\nexport default load\n" }, { "alpha_fraction": 0.5628742575645447, "alphanum_fraction": 0.5628742575645447, "avg_line_length": 17.55555534362793, "blob_id": "de092309bf65bc979d0b9f7b60092569072e84ff", "content_id": "c1083012d1dc168ed10e775c0ad416648fd74ae1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 167, "license_type": "permissive", "max_line_length": 43, "num_lines": 9, "path": "/bids-validator/utils/files/sessions.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "export default {\n sessionMatcher: new RegExp('(ses-.*?)/'),\n\n Subject: function () {\n this.files = []\n this.sessions = []\n this.missingSessions = []\n },\n}\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 17.66666603088379, "blob_id": "e7a6d87e60bbe3fa96de241a81e9c30b0bd32d15", "content_id": "a92deb1debab6ed6a8b41f4b462d9e28ea6f81a0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 112, "license_type": "permissive", "max_line_length": 51, "num_lines": 6, "path": "/bids-validator/tests/bids-web.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "/**\n * eslint no-console: [\"error\", { allow: [\"log\"] }]\n * @jest-environment jsdom\n */\n\nimport './bids.spec.js'\n" }, { "alpha_fraction": 0.6123988628387451, "alphanum_fraction": 0.6339423656463623, "avg_line_length": 27.353092193603516, "blob_id": "f018ef9b61f6107513b6e978aa83ee6590bbd90f", "content_id": "902114204942a6daf9440ce913cf613f21782a1d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 11001, "license_type": "permissive", "max_line_length": 197, "num_lines": 388, "path": "/bids-validator/utils/files/readDir.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import ignore from 'ignore'\nimport readFile from './readFile'\nimport path from 'path'\nimport fs from 'fs'\nimport * as child_proccess from 'child_process'\nimport isNode from '../isNode'\n\n/**\n * Read Directory\n *\n * In node it takes a path to a directory and returns\n * an array containing all of the files to a callback.\n * Used to input and organize files in node, in a\n * similar structure to how chrome reads a directory.\n * In the browser it simply passes the file dir\n * object to the callback.\n * @param {String} dir Path to read\n * @param {Object} options\n * @param {boolean} options.ignoreSymlinks enable to prevent recursively following directory symlinks\n * @returns {Promise<Object>}\n */\nasync function readDir(dir, options = {}) {\n const ig = await getBIDSIgnore(dir)\n const fileArray = isNode\n ? await preprocessNode(path.resolve(dir), ig, options)\n : preprocessBrowser(dir, ig)\n const files = fileArrayToObject(fileArray)\n return files\n}\n\n/**\n * Transform array of file-like objects to one object with each file as a property\n * @param {Array[Object]} fileArray\n * @returns {Object}\n */\nfunction fileArrayToObject(fileArray) {\n const filesObj = {}\n // converting array to object\n for (let j = 0; j < fileArray.length; j++) {\n filesObj[j] = fileArray[j]\n }\n return filesObj\n}\n\n/**\n * Preprocess file objects from a browser\n *\n * 1. Filters out ignored files and folder.\n * 2. Adds 'relativePath' field of each file object.\n */\nfunction preprocessBrowser(filesObj, ig) {\n const filesList = []\n for (let i = 0; i < filesObj.length; i++) {\n const fileObj = filesObj[i]\n fileObj.relativePath = harmonizeRelativePath(fileObj.webkitRelativePath)\n if (ig.ignores(path.relative('/', fileObj.relativePath))) {\n fileObj.ignore = true\n }\n filesList.push(fileObj)\n }\n return filesList\n}\n\n/**\n * Harmonize Relative Path\n *\n * Takes a file and returns the browser style relative path\n * base on the environment.\n *\n * Since this may be called in the browser, do not call Node.js modules\n *\n * @param {String} path Relative path to normalize\n * @returns {String}\n */\nfunction harmonizeRelativePath(path) {\n // This hack uniforms relative paths for command line calls to 'BIDS-examples/ds001/' and 'BIDS-examples/ds001'\n if (path.indexOf('\\\\') !== -1) {\n // This is likely a Windows path - Node.js\n const pathParts = path.split('\\\\')\n return '/' + pathParts.slice(1).join('/')\n } else if (path[0] !== '/') {\n // Bad POSIX path - Node.js\n const pathParts = path.split('/')\n return '/' + pathParts.slice(1).join('/')\n } else {\n // Already correct POSIX path - Browsers (all platforms)\n return path\n }\n}\n\n/**\n * Preprocess directory path from a Node CLI\n *\n * 1. Recursively travers the directory tree\n * 2. Filters out ignored files and folder.\n * 3. Harmonizes the 'relativePath' field\n */\nasync function preprocessNode(dir, ig, options) {\n const str = dir.substr(dir.lastIndexOf(path.sep) + 1) + '$'\n const rootpath = dir.replace(new RegExp(str), '')\n if (options.gitTreeMode) {\n // if in gitTreeMode, attempt to get files from git-annex metadata\n // before using fs\n const files = await getFilesFromGitTree(dir, ig, options)\n if (files !== null) return files\n }\n return await getFilesFromFs(dir, rootpath, ig, options)\n}\n\n/**\n * runs command `git ls-tree -l -r <git-ref>` in given directory\n * @param {string} cwd path to dataset directory\n * @param {string} gitRef git ref (commit hash, ref, 'HEAD', etc)\n * @returns {string[]}\n */\nconst getGitLsTree = (cwd, gitRef) =>\n new Promise((resolve) => {\n let output = ''\n const gitProcess = child_proccess.spawn(\n 'git',\n ['ls-tree', '-l', '-r', gitRef],\n {\n cwd,\n encoding: 'utf-8',\n },\n )\n gitProcess.stdout.on('data', (data) => {\n output += data.toString()\n })\n gitProcess.stderr.on('data', () => {\n resolve(null)\n })\n gitProcess.on('close', () => {\n resolve(output.trim().split('\\n'))\n })\n })\n\nconst readLsTreeLines = (gitTreeLines) =>\n gitTreeLines\n .map((line) => {\n const [metadata, path] = line.split('\\t')\n const [mode, objType, objHash, size] = metadata.split(/\\s+/)\n return { path, mode, objType, objHash, size }\n })\n .filter(\n ({ path, mode }) =>\n // skip git / datalad files and submodules\n !/^\\.git/.test(path) &&\n !/^\\.datalad/.test(path) &&\n '.gitattributes' !== path &&\n mode !== '160000',\n )\n .reduce(\n (\n // accumulator\n { files, symlinkFilenames, symlinkObjects },\n // git-tree line\n { path, mode, objHash, size },\n ) => {\n // read ls-tree line\n if (mode === '120000') {\n symlinkFilenames.push(path)\n symlinkObjects.push(objHash)\n } else {\n files.push({\n path,\n size: parseInt(size),\n })\n }\n return { files, symlinkFilenames, symlinkObjects }\n },\n { files: [], symlinkFilenames: [], symlinkObjects: [] },\n )\n\n/**\n * runs `git cat-file --batch --buffer` in given directory\n * @param {string} cwd\n * @param {string} input\n * @returns {string[]}\n */\nconst getGitCatFile = (cwd, input) =>\n new Promise((resolve) => {\n let output = ''\n const gitProcess = child_proccess.spawn(\n 'git',\n ['cat-file', '--batch', '--buffer'],\n {\n cwd,\n encoding: 'utf-8',\n },\n )\n\n // pass in symlink objects\n gitProcess.stdin.write(input)\n gitProcess.stdin.end()\n\n gitProcess.stdout.on('data', (data) => {\n output += data.toString()\n })\n gitProcess.stderr.on('data', () => {\n resolve(null)\n })\n gitProcess.on('close', () => {\n resolve(output.trim().split('\\n'))\n })\n })\n\nconst readCatFileLines = (gitCatFileLines, symlinkFilenames) =>\n gitCatFileLines\n // even lines contain unneeded metadata\n .filter((_, i) => i % 2 === 1)\n .map((line, i) => {\n const path = symlinkFilenames[i]\n const key = line.split('/').pop()\n const size = parseInt(key.match(/-s(\\d+)/)[1])\n return {\n path,\n size,\n }\n })\n\nconst processFiles = (dir, ig, ...fileLists) =>\n fileLists\n .reduce((allFiles, files) => [...allFiles, ...files], [])\n .map((file) => {\n file.relativePath = path.normalize(`${path.sep}${file.path}`)\n return file\n })\n .filter((file) => {\n const ignore = ig.ignores(file.relativePath.slice(1))\n return !ignore\n })\n .map((file) => {\n file.relativePath = harmonizeRelativePath(file.relativePath)\n file.name = path.basename(file.path)\n file.path = path.join(dir, file.relativePath)\n return file\n })\n\nasync function getFilesFromGitTree(dir, ig, options) {\n const gitTreeLines = await getGitLsTree(dir, options.gitRef)\n if (\n gitTreeLines === null ||\n (gitTreeLines.length === 1 && gitTreeLines[0] === '')\n )\n return null\n const { files, symlinkFilenames, symlinkObjects } =\n readLsTreeLines(gitTreeLines)\n\n const gitCatFileLines = await getGitCatFile(dir, symlinkObjects.join('\\n'))\n // example gitCatFile output:\n // .git/annex/objects/Mv/99/SHA256E-s54--42c98d14dbe3d066d35897a61154e39ced478cd1f0ec6159ba5f2361c4919878.json/SHA256E-s54--42c98d14dbe3d066d35897a61154e39ced478cd1f0ec6159ba5f2361c4919878.json\n // .git/annex/objects/QV/mW/SHA256E-s99--bbef536348750373727d3b5856398d7377e5d7e23875eed026b83d12cee6f885.json/SHA256E-s99--bbef536348750373727d3b5856398d7377e5d7e23875eed026b83d12cee6f885.json\n const symlinkFiles = readCatFileLines(gitCatFileLines, symlinkFilenames)\n\n return processFiles(dir, ig, files, symlinkFiles)\n}\n\n/**\n * Recursive helper function for 'preprocessNode'\n */\nasync function getFilesFromFs(dir, rootPath, ig, options, parent = []) {\n const files = await fs.promises.readdir(dir, { withFileTypes: true })\n const filesAccumulator = parent\n for (const file of files) {\n const fullPath = path.join(dir, file.name)\n const relativePath = harmonizeRelativePath(\n path.relative(rootPath, fullPath),\n )\n const ignore = ig.ignores(path.relative('/', relativePath))\n const fileObj = {\n name: file.name,\n path: fullPath,\n relativePath,\n }\n if (ignore) {\n fileObj.ignore = true\n }\n // Three cases to consider: directories, files, symlinks\n if (file.isDirectory()) {\n await getFilesFromFs(fullPath, rootPath, ig, options, filesAccumulator)\n } else if (file.isSymbolicLink()) {\n // Allow skipping symbolic links which lead to recursion\n // Disabling this is a big performance advantage on high latency\n // storage but it's a good default for versatility\n if (!options.ignoreSymlinks) {\n try {\n const targetPath = await fs.promises.realpath(fullPath)\n const targetStat = await fs.promises.stat(targetPath)\n // Either add or recurse from the target depending\n if (targetStat.isDirectory()) {\n await getFilesFromFs(\n targetPath,\n rootPath,\n ig,\n options,\n filesAccumulator,\n )\n } else {\n filesAccumulator.push(fileObj)\n }\n } catch (err) {\n // Symlink points at an invalid target, skip it\n return\n }\n } else {\n // This branch assumes all symbolic links are not directories\n filesAccumulator.push(fileObj)\n }\n } else {\n filesAccumulator.push(fileObj)\n }\n }\n return filesAccumulator\n}\n\nexport function defaultIgnore() {\n return ignore()\n .add('.*')\n .add('!*.icloud')\n .add('/derivatives')\n .add('/sourcedata')\n .add('/code')\n}\n\nasync function getBIDSIgnore(dir) {\n const ig = defaultIgnore()\n\n const bidsIgnoreFileObj = getBIDSIgnoreFileObj(dir)\n if (bidsIgnoreFileObj) {\n const content = await readFile(bidsIgnoreFileObj)\n ig.add(content)\n }\n return ig\n}\n\n/**\n * Get File object corresponding to the .bidsignore file\n * @param dir\n * @returns File object or null if not found\n */\nfunction getBIDSIgnoreFileObj(dir) {\n if (isNode) {\n return getBIDSIgnoreFileObjNode(dir)\n } else {\n return getBIDSIgnoreFileObjBrowser(dir)\n }\n}\n\nfunction getBIDSIgnoreFileObjNode(dir) {\n const path = dir + '/.bidsignore'\n try {\n fs.accessSync(path)\n return { path: path, stats: { size: null } }\n } catch (err) {\n return null\n }\n}\n\nfunction getBIDSIgnoreFileObjBrowser(dir) {\n for (var i = 0; i < dir.length; i++) {\n const fileObj = dir[i]\n const relativePath = harmonizeRelativePath(fileObj.webkitRelativePath)\n if (relativePath === '/.bidsignore') {\n return fileObj\n }\n }\n}\n\nexport {\n readDir,\n getFilesFromFs,\n fileArrayToObject,\n harmonizeRelativePath,\n readLsTreeLines,\n readCatFileLines,\n processFiles,\n}\n\nexport default Object.assign(readDir, {\n readDir,\n getFilesFromFs,\n fileArrayToObject,\n harmonizeRelativePath,\n readLsTreeLines,\n readCatFileLines,\n processFiles,\n})\n" }, { "alpha_fraction": 0.6505576372146606, "alphanum_fraction": 0.656753420829773, "avg_line_length": 24.21875, "blob_id": "22d77f4233ca0a74e3f032e3bf17225e8ca32f61", "content_id": "26ec275566d39714207ebc2f0bfc68005638f3ac", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 807, "license_type": "permissive", "max_line_length": 63, "num_lines": 32, "path": "/bids-validator/src/schema/entities.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { memoize } from '../utils/memoize.ts'\n\nexport interface BIDSEntities {\n suffix: string\n extension: string\n entities: Record<string, string>\n}\n\nexport function _readEntities(filename: string): BIDSEntities {\n let suffix = ''\n let extension = ''\n const entities: Record<string, string> = {}\n\n const parts = filename.split('_')\n for (let i = 0; i < parts.length - 1; i++) {\n const [entity, label] = parts[i].split('-')\n entities[entity] = label || 'NOENTITY'\n }\n\n const lastPart = parts[parts.length - 1]\n const extStart = lastPart.indexOf('.')\n if (extStart === -1) {\n suffix = lastPart\n } else {\n suffix = lastPart.slice(0, extStart)\n extension = lastPart.slice(extStart)\n }\n\n return { suffix, extension, entities }\n}\n\nexport const readEntities = memoize(_readEntities)\n" }, { "alpha_fraction": 0.603960394859314, "alphanum_fraction": 0.603960394859314, "avg_line_length": 27.85714340209961, "blob_id": "bd65aa9e2bcec599d3b7349ff94dd00f355596ec", "content_id": "7e8f8023a5d68387fcaa8c287f04296f21859413", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 404, "license_type": "permissive", "max_line_length": 64, "num_lines": 14, "path": "/bids-validator/src/setup/options.test.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assertEquals } from '../deps/asserts.ts'\nimport { parseOptions } from './options.ts'\n\nDeno.test('options parsing', async (t) => {\n await t.step('verify basic arguments work', async () => {\n const options = await parseOptions(['my_dataset', '--json'])\n assertEquals(options, {\n datasetPath: 'my_dataset',\n debug: 'ERROR',\n json: true,\n schema: 'latest',\n })\n })\n})\n" }, { "alpha_fraction": 0.6355382800102234, "alphanum_fraction": 0.6394293308258057, "avg_line_length": 20.41666603088379, "blob_id": "430571bd590fcb120f12a8871e6b1076685548c0", "content_id": "550eff0bf4ec2f6f5c0b5669d7934a6cad4bcbc1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 771, "license_type": "permissive", "max_line_length": 58, "num_lines": 36, "path": "/bids-validator/validators/bids/quickTestError.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import path from 'path'\nimport utils from '../../utils'\nconst Issue = utils.issues.Issue\nimport isNode from '../../utils/isNode'\n\n/*\n * Generates an error for quickTest failures\n */\nconst quickTestError = function (dir) {\n let filename\n if (isNode) {\n // For Node, grab the path from the dir string\n filename = path.basename(dir)\n } else {\n filename = constructFileName(dir)\n }\n const issue = new Issue({\n code: 61,\n file: {\n name: filename,\n path: path.join('.', filename),\n relativePath: path.join('', filename),\n },\n })\n return issue\n}\n\nconst constructFileName = (dir) => {\n try {\n return dir[0].webkitRelativePath.split(path.sep).pop()\n } catch (err) {\n return 'uploaded-directory'\n }\n}\n\nexport default quickTestError\n" }, { "alpha_fraction": 0.6024242639541626, "alphanum_fraction": 0.614545464515686, "avg_line_length": 32, "blob_id": "b1d9b44f2dab44797bc98a1b6ddace7d6847e2f1", "content_id": "4ef4a25297990b82acc377e1fbd127936ffcc277", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 825, "license_type": "permissive", "max_line_length": 74, "num_lines": 25, "path": "/bids-validator/utils/summary/__tests__/collectPetFields.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import collectPetFields, { orderedList } from '../collectPetFields.js'\nimport jsonContentsDict from '../../../tests/data/pet001_jsonContentsDict'\n\ndescribe('collectPetFields()', () => {\n it('extracts an ordered list of pet specific fields', () => {\n expect(collectPetFields(jsonContentsDict)).toEqual({\n BodyPart: ['Brain'],\n ScannerManufacturer: ['Siemens'],\n ScannerManufacturersModelName: [\n 'High-Resolution Research Tomograph (HRRT, CTI/Siemens)',\n ],\n TracerName: ['CIMBI-36'],\n TracerRadionuclide: ['C11'],\n })\n })\n})\n\ndescribe('orderedList()', () => {\n it('reduces a tally object to an ordered list', () => {\n expect(orderedList({ a: 3, b: 5, c: 1 })).toEqual(['b', 'a', 'c'])\n })\n it('handles empty objects', () => {\n expect(orderedList({})).toEqual([])\n })\n})\n" }, { "alpha_fraction": 0.6656976938247681, "alphanum_fraction": 0.6860465407371521, "avg_line_length": 28.913043975830078, "blob_id": "849de208f843a836d3b8f168b5d2953b0139f661", "content_id": "a5863ce4f705a27cdb8aaf49f671de4cf223bae3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 688, "license_type": "permissive", "max_line_length": 70, "num_lines": 23, "path": "/bids-validator/utils/__tests__/collectSubjectMetadata.spec.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { assert } from 'chai'\nimport collectSubjectMetadata from '../summary/collectSubjectMetadata'\n\ndescribe('collectSubjectMetadata', () => {\n it('extracts tsv string to subjectMetadata object', () => {\n const tsvFile = `participant_id\tage\tsex\nsub-01\t34\tF\nsub-02\t38\tM\n`\n const subjectMetadata = collectSubjectMetadata(tsvFile)\n assert.lengthOf(subjectMetadata, 2)\n assert.deepEqual(subjectMetadata[0], {\n participantId: '01',\n age: 34,\n sex: 'F',\n })\n })\n it('extracts tsv string to subjectMetadata object', () => {\n const tsvFile = ``\n const subjectMetadata = collectSubjectMetadata(tsvFile)\n assert.equal(subjectMetadata, undefined)\n })\n})\n" }, { "alpha_fraction": 0.6285959482192993, "alphanum_fraction": 0.6307427883148193, "avg_line_length": 26.081396102905273, "blob_id": "cae732f785704a7d4d565e4882a8e345e81442aa", "content_id": "fe0bacf8f38d42e89166a00209530d711eb2737c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 2329, "license_type": "permissive", "max_line_length": 91, "num_lines": 86, "path": "/bids-validator/src/files/browser.ts", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import { BIDSFile } from '../types/file.ts'\nimport { FileTree } from '../types/filetree.ts'\nimport { FileIgnoreRules } from './ignore.ts'\nimport { parse, join, SEP } from '../deps/path.ts'\n\n/**\n * Browser implement of BIDSFile wrapping native File/FileList types\n */\nexport class BIDSFileBrowser implements BIDSFile {\n #ignore: FileIgnoreRules\n #file: File\n\n constructor(file: File, ignore: FileIgnoreRules) {\n this.#file = file\n this.#ignore = ignore\n }\n\n get name(): string {\n return this.#file.name\n }\n\n get path(): string {\n // @ts-expect-error webkitRelativePath is defined in the browser\n return this.#file.webkitRelativePath\n }\n\n get size(): number {\n return this.#file.size\n }\n\n get stream(): ReadableStream<Uint8Array> {\n return this.#file.stream()\n }\n\n get ignored(): boolean {\n return this.#ignore.test(this.path)\n }\n\n text(): Promise<string> {\n return this.#file.text()\n }\n\n async readBytes(size: number, offset = 0): Promise<Uint8Array> {\n return new Uint8Array(await this.#file.slice(offset, size).arrayBuffer())\n }\n}\n\n/**\n * Convert from FileList (created with webkitDirectory: true) to FileTree for validator use\n */\nexport function fileListToTree(files: File[]): Promise<FileTree> {\n const ignore = new FileIgnoreRules([])\n const tree = new FileTree('', '/', undefined)\n for (const f of files) {\n const file = new BIDSFileBrowser(f, ignore)\n const fPath = parse(file.path)\n const levels = fPath.dir.split(SEP)\n if (levels[0] === '') {\n // Top level file\n tree.files.push(file)\n } else {\n let currentLevelTree = tree\n for (const level of levels) {\n const exists = currentLevelTree.directories.find(\n (d) => d.name === level,\n )\n // If this level exists, set it and descend once\n if (exists) {\n currentLevelTree = exists\n } else {\n // Otherwise make a new level and continue if needed\n const newTree = new FileTree(\n join(currentLevelTree.path, level),\n level,\n currentLevelTree,\n )\n currentLevelTree.directories.push(newTree)\n currentLevelTree = newTree\n }\n }\n // At the terminal leaf, add files\n currentLevelTree.files.push(file)\n }\n }\n return Promise.resolve(tree)\n}\n" }, { "alpha_fraction": 0.6069413423538208, "alphanum_fraction": 0.6113865375518799, "avg_line_length": 32.422855377197266, "blob_id": "4cd389e72797cacd6d292e456e929aaa4d2fe9e2", "content_id": "2063dc122a69bda2707f2ed0e9acd29703c498b3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5849, "license_type": "permissive", "max_line_length": 121, "num_lines": 175, "path": "/bids-validator/validators/schemaTypes.js", "repo_name": "bids-standard/bids-validator", "src_encoding": "UTF-8", "text": "import yaml from 'js-yaml'\nimport isNode from '../utils/isNode'\n\n// Version implemented by the internal rules or the included schema version\nconst localVersion = 'v1.6.0'\n\nconst modalities = [\n 'anat',\n 'beh',\n 'dwi',\n 'eeg',\n 'fmap',\n 'func',\n 'ieeg',\n 'meg',\n 'pet',\n]\n\nasync function loadYaml(base, path, local) {\n const url = `${base}/${path}`\n try {\n let text // Loaded raw yaml\n if (local) {\n throw Error('Defaulting to embedded bids-specification schema')\n } else {\n const res = await fetch(url)\n if (res.status !== 200) {\n throw Error(\n `Loading remote bids-specification schema failed, falling back to embedded bids-specification@${localVersion}`,\n )\n }\n text = await res.text()\n }\n return yaml.safeLoad(text)\n } catch (err) {\n if (isNode) {\n const fs = require('fs')\n const text = fs.readFileSync(url, 'utf-8')\n return yaml.safeLoad(text)\n } else {\n // TODO - handle the case where no yaml is available in the browser\n throw Error(\n `Loading remote bids-specification schema failed, and internal validation rules will be used instead`,\n )\n }\n }\n}\n\n/**\n * Load schema files from network or embedded copies\n * @param {string} base Base URL or path\n * @param {boolean} local Avoid any network access\n */\nasync function loadSchema(base, local = false) {\n // Define path prefix depending on the BIDS schema version\n const prefix_objects = base.includes('v1.6.0') ? '' : 'objects/'\n const prefix_rules = base.includes('v1.6.0') ? '' : 'rules/'\n const prefix_datatypes = prefix_rules + 'datatypes/'\n\n // Define schema files for top level files and entities\n const top = prefix_rules + 'top_level_files.yaml'\n const entities = prefix_objects + 'entities.yaml'\n\n return {\n top_level_files: await loadYaml(base, top, local),\n entities: await loadYaml(base, entities, local),\n datatypes: {\n anat: await loadYaml(base, prefix_datatypes + 'anat.yaml', local),\n beh: await loadYaml(base, prefix_datatypes + 'beh.yaml', local),\n dwi: await loadYaml(base, prefix_datatypes + 'dwi.yaml', local),\n eeg: await loadYaml(base, prefix_datatypes + 'eeg.yaml', local),\n fmap: await loadYaml(base, prefix_datatypes + 'fmap.yaml', local),\n func: await loadYaml(base, prefix_datatypes + 'func.yaml', local),\n ieeg: await loadYaml(base, prefix_datatypes + 'ieeg.yaml', local),\n meg: await loadYaml(base, prefix_datatypes + 'meg.yaml', local),\n pet: await loadYaml(base, prefix_datatypes + 'pet.yaml', local),\n },\n }\n}\n\n/**\n * Generate matching regular expressions based on the most recent bids-specification schema\n * @param {*} schema Loaded yaml schemas (js-yaml)\n * @param {boolean} pythonRegex Boolean flag to enable/disable Python compatible regex generation\n * @returns\n */\nexport async function generateRegex(schema, pythonRegex = false) {\n // Python regex needs a 'P' before matching group name\n const P = pythonRegex ? 'P' : ''\n const regex = {\n label: '[a-zA-Z0-9]+',\n index: '[0-9]+',\n sub_ses_dirs:\n '^[\\\\/\\\\\\\\](sub-[a-zA-Z0-9]+)[\\\\/\\\\\\\\](?:(ses-[a-zA-Z0-9]+)[\\\\/\\\\\\\\])?',\n type_dir: '[\\\\/\\\\\\\\]',\n sub_ses_entity: '\\\\1(_\\\\2)?',\n optional: '?',\n required: '',\n }\n\n const exportRegex = {\n top_level_files: [],\n datatypes: {\n anat: [],\n beh: [],\n dwi: [],\n eeg: [],\n fmap: [],\n func: [],\n ieeg: [],\n meg: [],\n pet: [],\n },\n }\n\n // Modality agnostic top level files\n for (const root of Object.keys(schema.top_level_files)) {\n const extensions = schema.top_level_files[root].extensions.join('|')\n const root_level = `[\\\\/\\\\\\\\]${root}${\n extensions === 'None' ? '' : `(?${P}<suffix>${extensions})`\n }$`\n exportRegex.top_level_files.push(new RegExp(root_level))\n }\n\n for (const mod of modalities) {\n const modality_datatype_schema = schema.datatypes[mod]\n for (const datatype of Object.keys(modality_datatype_schema)) {\n let file_regex = `${regex.sub_ses_dirs}${mod}${regex.type_dir}${regex.sub_ses_entity}`\n let entities = Object.keys(schema.entities)\n for (const entity of Object.keys(\n modality_datatype_schema[datatype].entities,\n )) {\n if (entities.includes(entity)) {\n const entityDefinion = schema.entities[entity]\n // sub and ses entities in file name handled by directory pattern matching groups\n if (entity === 'subject' || entity === 'session') {\n continue\n }\n const entityKey = entityDefinion.entity\n const format = regex[schema.entities[entity].format]\n if (format) {\n // Limitation here is that if format is missing an essential entity may be skipped\n file_regex += `(?${P}<${entity}>_${entityKey}-${format})${\n regex[modality_datatype_schema[datatype].entities[entity]]\n }`\n }\n }\n }\n const suffix_regex = `_(?${P}<suffix>${modality_datatype_schema[\n datatype\n ].suffixes.join('|')})`\n // Workaround v1.6.0 MEG extension \"*\"\n const wildcard_extensions = modality_datatype_schema[\n datatype\n ].extensions.map((ext) => (ext === '*' ? '.*?' : ext))\n const ext_regex = `(?${P}<ext>${wildcard_extensions.join('|')})`\n exportRegex.datatypes[mod].push(\n new RegExp(file_regex + suffix_regex + ext_regex + '$'),\n )\n }\n }\n return exportRegex\n}\n\nexport async function schemaRegex(version = localVersion, options = {}) {\n let schema\n if ('local' in options) {\n schema = await loadSchema('./bids-validator/spec/src/schema', true)\n } else {\n schema = await loadSchema(\n `https://raw.githubusercontent.com/bids-standard/bids-specification/${version}/src/schema`,\n )\n }\n return generateRegex(schema)\n}\n" } ]
198
wlyzcz/SKAARHOJ-Open-Engineering
https://github.com/wlyzcz/SKAARHOJ-Open-Engineering
d186c59ed9ffb79b4bb33222016d3cf7878b80a0
ff9e37156850416d4c2121384eee5b64e99c82b2
7ed289e6a381de2cdc63987c330201a8441e5d06
refs/heads/master
2020-03-07T03:22:21.590095
2018-03-26T20:15:42
2018-03-26T20:15:42
127,233,730
1
0
null
2018-03-29T03:48:27
2018-03-26T20:15:48
2018-03-26T20:15:46
null
[ { "alpha_fraction": 0.5869135856628418, "alphanum_fraction": 0.6232098937034607, "avg_line_length": 23.39759063720703, "blob_id": "993f5d48becb479dde02e61b939c26e73f7a22c4", "content_id": "9e65fe96300aa077e2d3fd3695f27a1e35428c75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4050, "license_type": "no_license", "max_line_length": 111, "num_lines": 166, "path": "/BlackMagic Design Arduino Shield/Sketches/SkaarhojFaderboard/SkaarhojFaderboard.ino", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "#include \"SkaarhojUtils.h\"\n#include \"Streaming.h\"\n#include <BMDSDIControl.h>\n#include \"Wire.h\"\n\nconst int shieldAddress = 0x6E;\n\n//BMD_SDICameraControl_I2C cameraControl(shieldAddress);\nBMD_SDICameraControl_I2C cameraControl;\n\nSkaarhojUtils utils;\nSkaarhojUtils utils2;\n\nvoid setup() {\n Serial.begin(115200);\n Wire.begin();\n \n cameraControl.begin(shieldAddress);\n //cameraControl.begin();\n Wire.setClock(400000L);\n // Init button pins, active -> low\n pinMode(A2, INPUT);\n pinMode(A3, INPUT);\n pinMode(2, INPUT);\n \n // Init LED pins\n pinMode(4, OUTPUT);\n pinMode(5, OUTPUT);\n pinMode(6, OUTPUT);\n pinMode(7, OUTPUT);\n pinMode(8, OUTPUT);\n pinMode(9, OUTPUT);\n \n // Set SDI Override\n cameraControl.setOverride(true);\n \n // Initialize slider\n utils.uniDirectionalSlider_init(40, 35, 35, A0);\n utils.uniDirectionalSlider_disableUnidirectionality(true);\n utils.uniDirectionalSlider_hasMoved();\n\n // Initialize knob\n utils2.uniDirectionalSlider_init(40, 35, 35, A1);\n utils2.uniDirectionalSlider_disableUnidirectionality(true);\n utils2.uniDirectionalSlider_hasMoved(); \n}\n\nuint8_t buttonState[3];\nunsigned long buttonStateChanged[3];\n\nbool buttonHelper(uint8_t button) {\n uint8_t reading;\n switch(button) {\n case 0:\n reading = digitalRead(A2);\n break;\n case 1:\n reading = digitalRead(A3);\n break;\n case 2:\n reading = digitalRead(2);\n break;\n }\n \n if(millis() - buttonStateChanged[button] > 50) {\n if(buttonState[button] == 1) {\n buttonState[button] = 2;\n return true;\n } else if(buttonState[button] == 3) {\n buttonState[button] = 0;\n }\n }\n \n if(reading == LOW && buttonState[button] == 0) {\n buttonState[button] = 1;\n buttonStateChanged[button] = millis();\n }\n \n if(reading == HIGH && buttonState[button] == 2) {\n buttonState[button] = 3;\n buttonStateChanged[button] = millis();\n }\n \n return false;\n}\n\nvoid setLed(uint8_t num, bool red, bool green) {\n switch(num) {\n case 0:\n digitalWrite(4, green);\n digitalWrite(5, red);\n break;\n case 1:\n digitalWrite(6, green);\n digitalWrite(7, red);\n break;\n case 2:\n digitalWrite(8, green);\n digitalWrite(9, red);\n break;\n }\n}\n\n// 0: Knob controls master black , 1-3: Knob controls gamma R,G,B respectively\nuint8_t currentState = 0;\n\nfloat lift[4] = {0.0, 0.0, 0.0, 0.0};\nvoid loop() {\n // Check if slider has moved\n if(utils.uniDirectionalSlider_hasMoved()) {\n Serial << F(\"New slider position\") << utils.uniDirectionalSlider_position() << \"\\n\";\n if(utils.uniDirectionalSlider_isAtEnd()) {\n Serial << F(\"Slider at end\\n\");\n }\n cameraControl.writeCommandFixed16(1, 0, 3, 0, 1.0 - (float)(utils.uniDirectionalSlider_position()/1000.0));\n }\n \n // Check if knob has moved\n if(utils2.uniDirectionalSlider_hasMoved()) {\n Serial << F(\"New knob position\") << utils2.uniDirectionalSlider_position() << \"\\n\";\n if(utils2.uniDirectionalSlider_isAtEnd()) {\n Serial << F(\"Knob at end\\n\");\n }\n lift[(currentState+3)%4] = (float)(1.0 - utils2.uniDirectionalSlider_position()/1000.0);\n cameraControl.writeCommandFixed16(1, 8, 0, 0, lift);\n }\n \n if(buttonHelper(0)) {\n if(currentState == 1) {\n currentState = 0;\n setLed(0, false, false);\n } else {\n Serial << F(\"Button 1 pressed\\n\");\n setLed(0, true, false);\n setLed(1, false, false);\n setLed(2, false, false);\n currentState = 1;\n }\n }\n \n if(buttonHelper(1)) {\n if(currentState == 2) {\n currentState = 0;\n setLed(1, false, false);\n } else {\n Serial << F(\"Button 2 pressed\\n\");\n setLed(0, false, false);\n setLed(1, true, false);\n setLed(2, false, false);\n currentState = 2;\n }\n }\n \n if(buttonHelper(2)) {\n if(currentState == 3) {\n currentState = 0;\n setLed(2, false, false);\n } else {\n Serial << F(\"Button 3 pressed\\n\");\n setLed(0, false, false);\n setLed(1, false, false);\n setLed(2, true, false);\n currentState = 3;\n }\n }\n}\n" }, { "alpha_fraction": 0.6073341369628906, "alphanum_fraction": 0.6283477544784546, "avg_line_length": 25.670330047607422, "blob_id": "00644d84f7af5e703030a70987eeb5dca81ec728", "content_id": "3f405310ff2633ba939796174750009cabfabc85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2427, "license_type": "no_license", "max_line_length": 135, "num_lines": 91, "path": "/DeviceCoreFiles/UniSketchTCPClient/TCPserver_joystickExample.py", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport socketserver\nimport socket\nimport base64\nimport re\n\n\n\"\"\"\nTest with JoyStick on XC7+XC3\n\n- Keeps the connection alive\n- \n\"\"\"\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n\t\n\tdef handle(self):\n\t\tself.request.settimeout(1)\n\t\tbusy = False;\t# This keeps track of the busy/ready state of the client. We can use this to make sure we are not spamming it with data\n\t\t\n\t\tHWCcolor = [2] * 256\n\t\txSpeed = 0\n\t\tySpeed = 0\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t# self.request is the TCP socket connected to the client\n\t\t\t\tself.data = self.request.recv(1024).strip()\n\t\t\texcept socket.timeout:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif self.data != b'':\n\t\t\t\t\tfor line in self.data.split(b\"\\n\"):\n\t\t\t\t\t\toutputline = \"\"\n\t\t\t\t\t\tprint(\"Client {} sent: '{}<NL>'\".format(self.client_address[0], line.decode('ascii')))\n\n\t\t\t\t\t\tif line == b\"list\":\n\t\t\t\t\t\t\toutputline = \"ActivePanel=1\\n\"\n\t\t\t\t\t\t\tself.panelInitialized = True\n\t\t\t\t\t\t\tbusy = False\n\t\t\t\t\t\tif line == b\"BSY\":\n\t\t\t\t\t\t\tbusy = True\n\n\t\t\t\t\t\tif line == b\"RDY\":\n\t\t\t\t\t\t\tbusy = False\n\n\t\t\t\t\t\tif line == b\"ping\":\n\t\t\t\t\t\t\toutputline = \"ack\\n\"\n\t\t\t\t\t\t\tbusy = False\n\n\t\t\t\t\t\t# Parse down trigger:\n\t\t\t\t\t\tmatch = re.search(r\"^HWC#([0-9]+)=Speed:([\\-0-9]+)$\", line.decode('ascii'))\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tHWc = int(match.group(1));\t# Extract the HWc number of the keypress from the match\n\t\t\t\t\t\t\tif (HWc==44):\n\t\t\t\t\t\t\t\txSpeed = int(match.group(2));\t# Speed\n\t\t\t\t\t\t\tif (HWc==43):\n\t\t\t\t\t\t\t\tySpeed = int(match.group(2));\t# Speed\n\n\t\t\t\t\t\t\tif (xSpeed!=0 or ySpeed!=0):\n\n\t\t\t\t\t\t\t\t# Highlight the button and turn on binary output:\n\t\t\t\t\t\t\t\toutputline = \"HWCt#40={}|||Speeds:|1|x:|y:|{}|0\\n\".format(xSpeed, ySpeed)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\toutputline = \"HWCt#40=|||Speeds:|1|Still\\n\"\n\t\t\t\t\t\t\n\t\t\t\t\t\t# If outputline not empty, send content back to client:\n\t\t\t\t\t\tif outputline:\n\t\t\t\t\t\t\tself.request.sendall(outputline.encode('ascii'))\n\t\t\t\t\t\t\tprint(\"- Returns: '{}'\".format(outputline.replace(\"\\n\",\"<NL>\")))\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"{} closed\".format(self.client_address[0]))\n\t\t\t\t\tbreak\n\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\tpass\n\nHOST, PORT = \"0.0.0.0\", 9923\n\n# Create the server, binding to localhost on port 9923\nserver = ThreadedTCPServer((HOST, PORT), MyTCPHandler, bind_and_activate=False)\nserver.allow_reuse_address = True\nserver.server_bind()\nserver.server_activate()\n\n# Activate the server; this will keep running until you\n# interrupt the program with Ctrl-C\nserver.serve_forever()\n" }, { "alpha_fraction": 0.5154104232788086, "alphanum_fraction": 0.6193040013313293, "avg_line_length": 32.07225799560547, "blob_id": "7f614fa968e70c2122bbcbcd40cdacf1474cb3ae", "content_id": "13d911b9551bb0a436ba1dcfb51a3f2e732892ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 25632, "license_type": "no_license", "max_line_length": 397, "num_lines": 775, "path": "/SKAARDUINO/TestRig/TestRig.ino", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "/**\n * Test application for SKAARDUINO mounted in Test Rig\n *\n * Press and hold the config button to enable extensive test of IO, otherwise it will quickly cycle status LED and test ethernet only.\n */\n\n\n\n// Including libraries:\n#include <SPI.h>\n#include <Wire.h>\n#include <Ethernet.h>\n#include <Streaming.h>\n#ifdef __arm__ /* Arduino DUE */\n#include \"SkaarhojDueEEPROM.h\"\nSkaarhojDueEEPROM EEPROM;\n#else\n#include \"EEPROM.h\"\n#endif\n\n#include <SkaarhojTools.h>\nSkaarhojTools sTools(0);\n\n// MAC address and IP address for this *particular* SKAARDUINO\nbyte mac[] = {0x90, 0xA1, 0xDA, 0, 0, 0}; // If last three bytes are zero it will set a random Mac address and store in EEPROM (only if not already set, or if config button is pressed)\nIPAddress ip(192, 168, 10, 0); // If last byte is zero it will set a random IP address and store in EEPROM (only if not already set, or if config button is pressed)\n\nuint8_t slaveAddr = 2; // The Test Rig slave we talk to in this sketch\n\n#include <SkaarhojBufferTools.h>\n#include <SkaarhojTCPServer.h>\nSkaarhojTCPServer TCPServer(8899); // Port 8899 used for telnet server\n\n#include <UDPmessenger.h>\nUDPmessenger messenger;\n\n#include <utility/w5100.h>\n\n#include <SkaarhojPgmspace.h>\n#include <SharedVariables.h>\n\nSharedVariables shareObj(4); // Number of shared variables we allocate memory to handle (every registered variable consumes 12 bytes of memory)\n\n\n// Test shared variables (slave function of this device):\nbool someBoolean = false;\nuint16_t someWord = 65535;\nlong someLong = 65535;\nchar someString[60];\n\n#include <Adafruit_GFX.h>\n#include <SkaarhojDisplayArray.h>\n\nSkaarhojDisplayArray display;\n\n\n\n\n\n// width x height = 128,13\nstatic const uint8_t SKAARHOJ_Logo[] PROGMEM = {\n B00011111, B11111111, B00111000, B00011111, B00000011, B11000000, B00000011, B11000000, B01111111, B11111100, B00111100, B00000111, B10000001, B11111110, B00000000, B00001111,\n B01111111, B11111111, B00111000, B00111110, B00000011, B11100000, B00000011, B11100000, B01111111, B11111111, B00111100, B00000111, B10000111, B11111111, B11000000, B00001111,\n B11111111, B11111111, B00111000, B01111100, B00000111, B11110000, B00000111, B11100000, B01111111, B11111111, B10111100, B00000111, B10011111, B11001111, B11100000, B00001111,\n B11111000, B00000000, B00111000, B11111000, B00000111, B11110000, B00001111, B11110000, B01111000, B00001111, B10111100, B00000111, B10011110, B00000011, B11100000, B00001111,\n B11111000, B00000000, B00111001, B11110000, B00001111, B01111000, B00001111, B01110000, B01111000, B00000111, B10111100, B00000111, B10111110, B00000001, B11100000, B00001111,\n B11111111, B11111100, B00111011, B11100000, B00001111, B01111000, B00011110, B01111000, B01111000, B00011111, B10111111, B11111111, B10111100, B00000001, B11110000, B00001111,\n B01111111, B11111111, B00111111, B11000000, B00011110, B00111100, B00011110, B00111100, B01111111, B11111111, B00111111, B11111111, B10111100, B00000001, B11110000, B00001111,\n B00011111, B11111111, B10111011, B11100000, B00011110, B00111100, B00111110, B00111100, B01111111, B11111110, B00111111, B11111111, B10111100, B00000001, B11110000, B00001111,\n B00000000, B00001111, B10111001, B11110000, B00111111, B11111110, B00111111, B11111110, B01111000, B01111100, B00111100, B00000111, B10111110, B00000001, B11100000, B00001111,\n B00000000, B00001111, B10111000, B11111000, B00111111, B11111110, B01111111, B11111110, B01111000, B00111110, B00111100, B00000111, B10011110, B00000001, B11100000, B00011111,\n B01111111, B11111111, B10111000, B01111100, B01111000, B00001111, B01111000, B00001111, B01111000, B00011111, B00111100, B00000111, B10011111, B10000111, B11000000, B00111110,\n B01111111, B11111111, B00111000, B00111110, B01111000, B00001111, B11110000, B00001111, B01111000, B00001111, B10111100, B00000111, B10001111, B11111111, B10011111, B11111110,\n B01111111, B11111100, B00111000, B00011111, B11110000, B00000111, B11110000, B00000111, B11111000, B00000111, B10111100, B00000111, B10000001, B11111110, B00011111, B11110000,\n};\n\n\n\n\n\n\n\n/**\n * Callback function for telnet incoming lines\n * Passes the TCPserver object on towards the shareObj so it can process the input.\n */\nvoid handleTelnetIncoming() {\n shareObj.incomingASCIILine(TCPServer, TCPServer._server);\n\n Serial << F(\"Values:\\n\");\n shareObj.printValues(Serial);\n}\n\n/**\n * Callback function for UDP incoming data\n * Passes the messenger object on towards the shareObj so it can process the input.\n */\nvoid UDPmessengerReceivedCommand(const uint8_t cmd, const uint8_t from, const uint8_t dataLength, const uint8_t *dataArray) {\n shareObj.incomingBinBuffer(messenger, cmd, from, dataLength, dataArray);\n}\n\n/**\n * Callback function for shareObj when an external event from UDP or Telnet has changed a local shared variable\n */\nvoid handleExternalChangeOfValue(uint8_t idx) {\n Serial << F(\"Value idx=\") << idx << F(\" changed: \");\n shareObj.printSingleValue(Serial, idx);\n Serial << F(\"\\n\");\n}\n\n/**\n * Callback function for when slaves return read response data. This data has no place to end up (unless associated with a local shared variable) so it must be handled accordingly.\n */\nint testRigSlaveVariable = 0;\nuint8_t testRigSlaveTestStatus = 0;\nvoid handleExternalUDPReadResponse(const uint8_t slaveIdx, const uint8_t slaveAddress, const uint8_t dataLength, const uint8_t *dataArray) {\n // This will display the incoming data:\n /* Serial << F(\"Read Response from: \") << ip[0] << \".\" << ip[1] << \".\" << ip[2] << \".\" << slaveAddress << F(\": IDX=\") << _HEXPADL(slaveIdx, 2, \"0\") << F(\", DATA=\");\n for (uint8_t i = 0; i < dataLength; i++) {\n Serial << _HEXPADL(dataArray[i], 2, \"0\") << (dataLength > i + 1 ? F(\",\") : F(\"\"));\n }\n Serial.println();\n */\n long temp;\n\n if (slaveAddress == 2) {\n switch (slaveIdx) {\n case 0:\n temp = shareObj.read_int(dataArray);\n // Serial << F(\"Value, int: \") << temp << F(\"\\n\");\n if (temp == testRigSlaveVariable + 1) {\n testRigSlaveTestStatus = 1; // OK\n } else {\n testRigSlaveTestStatus = 2; // ERROR\n }\n break;\n }\n }\n}\n\n\n\n\n\n\n#include <MemoryFree.h>\n\nvoid setup() {\n\n randomSeed(analogRead(0) + analogRead(1) + analogRead(5) + analogRead(6) + analogRead(7));\n\n // Initialize serial communication at 115200 bits per second:\n Serial.begin(115200);\n Serial << F(\"\\n\\n******** SKAARDUINO TEST RIG START ********\\n\");\n\n // Setup:\n pinMode(18, INPUT); // CFG input (active low) - if you set it to INPUT_PULLUP, the resistor on the Bottom part will not be strong enough to pull it down!!\n\n digitalWrite(13, 1); // To prevent brief light in LEDs upon boot\n digitalWrite(15, 1); // To prevent brief light in LEDs upon boot\n digitalWrite(14, 1); // To prevent brief light in LEDs upon boot\n pinMode(13, OUTPUT); // Red Status LED, active low\n pinMode(14, OUTPUT); // Blue Status LED, active low\n pinMode(15, OUTPUT); // Green Status LED, active low\n\n pinMode(10, INPUT); // \"D2\"\n pinMode(11, INPUT); // \"D3\"\n pinMode(12, OUTPUT); // \"D4\"\n pinMode(2, INPUT); // \"D8\"\n pinMode(3, INPUT); // \"D9\"\n pinMode(19, INPUT); // \"D10\"\n\n delay(100);\n\n\n\n\n // I2C / Display setup:\n Serial << F(\"Calling Wire.begin()\\n\");\n Wire.begin();\n\n Serial << F(\"Writing to display\\n\");\n display.begin(4, 1);\n display.clearDisplay(); // clears the screen and buffer\n display.display(B1); // Write to all\n display.setRotation(0);\n\n bool configMode = false;\n\n\n // ***************************\n // Config/Reset button:\n // ***************************\n\n // Test display:\n Serial << F(\"Test display\\n\");\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.print(\"CFG Button\");\n display.display(B1);\n\n\n // Check CFG input:\n Serial << F(\"\\nTEST: Check if config button is held down after power-up/reset...\\n\");\n if (!digitalRead(18)) { // Config mode, light up status LED in red and stop.\n digitalWrite(13, 0); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 1); // Blue\n Serial << F(\" => Config Mode ON: Status LED is red.\\n\");\n\n display.println(\" = ON!\");\n display.display(B1);\n\n configMode = true;\n } else {\n Serial << F(\" => Config Mode OFF\\n\");\n display.println(\" = Off\");\n display.display(B1);\n }\n delay(500);\n\n\n // Setting MAC address from(/to) EEPROM:\n bool createMACaddress = false;\n char buffer[18];\n if (mac[3] == 0 && mac[4] == 0 && mac[5] == 0) {\n if (configMode || (uint8_t)EEPROM.read(16) != ((EEPROM.read(10) + EEPROM.read(11) + EEPROM.read(12) + EEPROM.read(13) + EEPROM.read(14) + EEPROM.read(15)) & 0xFF)) {\n Serial << F(\"MAC address not found in EEPROM memory (or config mode)! Creating random one...\\n\");\n\n mac[3] = random(0, 256);\n mac[4] = random(0, 256);\n mac[5] = random(0, 256);\n\n // Set MAC address + checksum:\n EEPROM.write(10, mac[0]);\n EEPROM.write(11, mac[1]);\n EEPROM.write(12, mac[2]);\n EEPROM.write(13, mac[3]);\n EEPROM.write(14, mac[4]);\n EEPROM.write(15, mac[5]);\n EEPROM.write(16, (mac[0] + mac[1] + mac[2] + mac[3] + mac[4] + mac[5]) & 0xFF);\n\n sprintf(buffer, \"%02X:%02X:%02X:%02X:%02X:%02X\", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);\n Serial.print(\"Storing SKAARDUINO MAC address: \\n \");\n Serial.print(buffer);\n Serial.print(\" - Checksum: \");\n Serial.println(EEPROM.read(16));\n\n createMACaddress = true;\n }\n\n // Getting from EEPROM:\n mac[0] = EEPROM.read(10);\n mac[1] = EEPROM.read(11);\n mac[2] = EEPROM.read(12);\n mac[3] = EEPROM.read(13);\n mac[4] = EEPROM.read(14);\n mac[5] = EEPROM.read(15);\n }\n sprintf(buffer, \"%02X:%02X:%02X:%02X:%02X:%02X\", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);\n Serial << F(\"SKAARDUINO MAC address: \") << buffer << F(\" - Checksum: \")\n << ((mac[0] + mac[1] + mac[2] + mac[3] + mac[4] + mac[5]) & 0xFF) << \"\\n\";\n\n // Setting IP:\n if (ip[3] == 0) {\n\n if (createMACaddress) {\n Serial << F(\"Creating random IP address ...\\n\");\n\n ip[3] = random(3, 250);\n\n // Set IP address:\n EEPROM.write(2, ip[0]);\n EEPROM.write(3, ip[1]);\n EEPROM.write(4, ip[2]);\n EEPROM.write(5, ip[3]);\n\n sprintf(buffer, \"%d.%d.%d.%d\", ip[0], ip[1], ip[2], ip[3]);\n Serial.print(\"Storing SKAARHOJ Device (Arduino) IP address: \");\n Serial.println(buffer);\n }\n\n ip[0] = EEPROM.read(2);\n ip[1] = EEPROM.read(3);\n ip[2] = EEPROM.read(4);\n ip[3] = EEPROM.read(5);\n }\n sprintf(buffer, \"%d.%d.%d.%d\", ip[0], ip[1], ip[2], ip[3]);\n Serial << F(\"SKAARDUINO IP address: \") << buffer << \"\\n\";\n\n\n if (configMode) {\n delay(2000);\n digitalWrite(13, 1); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 1); // Blue\n delay(200);\n }\n\n\n // ***************************\n // Cycle LEDs\n // ***************************\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"Cycling Status LED\");\n display.display(B1);\n\n\n // Test status LED: (cycled colors, ends with off):\n Serial << F(\"\\nTEST: Cycles Status LED colors\\n\");\n for (uint8_t a = 1; a <= 7; a++) {\n statusLed(a);\n delay(300);\n }\n statusLed(0);\n delay(500);\n\n\n\n if (configMode) {\n\n\n // ***************************\n // Digital IO check\n // ***************************\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"#1 Digital IO Check\");\n display.display(B1);\n\n bool errorFlag = false;\n Serial << F(\"\\nTEST #1: Digital pin 2 and 3...\\n\");\n\n pinMode(2, OUTPUT); // \"D8\"\n pinMode(3, INPUT); // \"D9\"\n digitalWrite(2, 1);\n delay(20);\n if (digitalRead(3) != 1) {\n Serial << F(\" => ERROR: D2 was set high, but D3 didn't follow!\\n\");\n errorFlag = true;\n }\n digitalWrite(2, 0);\n delay(20);\n if (digitalRead(3) != 0) {\n Serial << F(\" => ERROR: D2 was set low, but D3 didn't follow!\\n\");\n errorFlag = true;\n }\n\n pinMode(2, INPUT); // \"D8\"\n pinMode(3, OUTPUT); // \"D9\"\n digitalWrite(3, 1);\n delay(20);\n if (digitalRead(2) != 1) {\n Serial << F(\" => ERROR: D3 was set high, but D2 didn't follow!\\n\");\n errorFlag = true;\n }\n digitalWrite(3, 0);\n delay(20);\n if (digitalRead(2) != 0) {\n Serial << F(\" => ERROR: D3 was set low, but D2 didn't follow!\\n\");\n errorFlag = true;\n }\n\n if (!errorFlag) Serial << F(\" => OK: D2 and D3 worked\\n\");\n flashStatusLed(errorFlag ? 1 : 2);\n delay(500);\n\n\n\n\n\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"#2 Digital IO Check\");\n display.display(B1);\n\n errorFlag = false;\n Serial << F(\"\\nTEST #2: Digital pin 10 and 11...\\n\");\n\n pinMode(10, OUTPUT); // \"D2\"\n pinMode(11, INPUT); // \"D3\"\n digitalWrite(10, 1);\n delay(20);\n if (digitalRead(11) != 1) {\n Serial << F(\" => ERROR: D10 was set high, but D11 didn't follow!\\n\");\n errorFlag = true;\n }\n digitalWrite(10, 0);\n delay(20);\n if (digitalRead(11) != 0) {\n Serial << F(\" => ERROR: D10 was set low, but D11 didn't follow!\\n\");\n errorFlag = true;\n }\n\n pinMode(10, INPUT); // \"D2\"\n pinMode(11, OUTPUT); // \"D3\"\n digitalWrite(11, 1);\n delay(20);\n if (digitalRead(10) != 1) {\n Serial << F(\" => ERROR: D11 was set high, but D10 didn't follow!\\n\");\n errorFlag = true;\n }\n digitalWrite(11, 0);\n delay(20);\n if (digitalRead(10) != 0) {\n Serial << F(\" => ERROR: D11 was set low, but D10 didn't follow!\\n\");\n errorFlag = true;\n }\n\n if (!errorFlag) Serial << F(\" => OK: D10 and D11 worked\\n\");\n flashStatusLed(errorFlag ? 1 : 2);\n delay(500);\n\n\n\n // ***************************\n // Serial 1:\n // ***************************\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"#3: Serial1 check\");\n display.display(B1);\n\n // Initialize serial communication at 115200 bits per second:\n Serial << F(\"\\nTEST #3: Check Serial1\\n\");\n Serial1.begin(115200);\n Serial1 << F(\"A\");\n delay(50);\n if (Serial1.available()) {\n int inByte = Serial1.read();\n if (inByte == 65) {\n Serial << F(\" => OK: Serial input was correct\\n\");\n flashStatusLed(2);\n } else {\n Serial << F(\" => ERROR: Serial1 input was not 'A' but '\") << (char)inByte << F(\"'\\n\");\n flashStatusLed(1);\n }\n } else {\n Serial << F(\" => ERROR: No Serial input data\\n\");\n flashStatusLed(1);\n }\n delay(500);\n\n\n\n\n\n\n // ***************************\n // Analog\n // ***************************\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"#4 Analog check\");\n display.display(B1);\n\n // Initialize serial communication at 115200 bits per second:\n Serial << F(\"\\nTEST #4: Analog inputs\\n\");\n errorFlag = false;\n\n digitalWrite(12, HIGH);\n delay(2000);\n if (!isAround(analogRead(2), 878)) {\n Serial << F(\" => ERROR: analogRead(2) was not around 878 but \") << analogRead(2) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(3), 658)) {\n Serial << F(\" => ERROR: analogRead(3) was not around 658 but \") << analogRead(3) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(4), 439)) {\n Serial << F(\" => ERROR: analogRead(4) was not around 439 but \") << analogRead(4) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(0), 219)) {\n Serial << F(\" => ERROR: analogRead(0) was not around 219 but \") << analogRead(0) << F(\" instead\\n\");\n errorFlag = true;\n }\n\n digitalWrite(12, LOW);\n delay(2000);\n if (!isAround(analogRead(2), 0)) {\n Serial << F(\" => ERROR: analogRead(2) was not around 0 but \") << analogRead(2) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(3), 0)) {\n Serial << F(\" => ERROR: analogRead(3) was not around 0 but \") << analogRead(3) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(4), 0)) {\n Serial << F(\" => ERROR: analogRead(4) was not around 0 but \") << analogRead(4) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(0), 0)) {\n Serial << F(\" => ERROR: analogRead(0) was not around 0 but \") << analogRead(0) << F(\" instead\\n\");\n errorFlag = true;\n }\n\n analogWrite(12, 128);\n delay(2000);\n if (!isAround(analogRead(2), 439)) {\n Serial << F(\" => ERROR: analogRead(2) was not around 439 but \") << analogRead(2) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(3), 329)) {\n Serial << F(\" => ERROR: analogRead(3) was not around 329 but \") << analogRead(3) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(4), 219)) {\n Serial << F(\" => ERROR: analogRead(4) was not around 219 but \") << analogRead(4) << F(\" instead\\n\");\n errorFlag = true;\n }\n if (!isAround(analogRead(0), 109)) {\n Serial << F(\" => ERROR: analogRead(0) was not around 109 but \") << analogRead(0) << F(\" instead\\n\");\n errorFlag = true;\n }\n\n // TODO: Add check for D10\n\n if (!errorFlag) Serial << F(\" => OK: Analog inputs worked\\n\");\n flashStatusLed(errorFlag ? 1 : 2);\n delay(500);\n\n }\n\n // ***************************\n // Start the Ethernet:\n // ***************************\n // Test display:\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 17);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"#5 Ethernet Test\");\n display << F(\"IP: \") << ip;\n display.display(B1);\n\n\n Ethernet.begin(mac, ip);\n delay(1000);\n\n Serial << F(\"\\nEthernet IP: \") << ip << F(\"\\n\");\n Serial << F(\"UDP Shared Variables server/slave on port 8765\\n\");\n Serial << F(\"Telnet server on port 8899\\n\");\n\n W5100.setRetransmissionTime(0xD0); // Milli seconds\n W5100.setRetransmissionCount(1);\n\n // Initialize UDP messenger and connect to shareObj through callback:\n messenger.registerReceptionCallbackFunction(UDPmessengerReceivedCommand);\n messenger.begin(ip, 8765); // Port number to listen on for UDP packets\n messenger.serialOutput(true); // Remove or comment\n\n shareObj.setExternalChangeOfVariableCallback(handleExternalChangeOfValue);\n shareObj.setExternalReadResponseCallback(handleExternalUDPReadResponse);\n\n // NOTE: Names (the first \"PSTR()\" in the argument list of shareLocalVariable()) for sharing variables is the string by which the variable is referenced via the buffer interface - make sure not to name two variables the same or substrings of each other, for instance dont use names like \"My Var\" and \"My Var2\" because \"My Var\" is a substring of \"My Var2\". This may lead to unexpected behaviours.\n shareObj.shareLocalVariable(0, someBoolean, 3, PSTR(\"Test Bool\"), PSTR(\"\"));\n shareObj.shareLocalVariable(1, someWord, 3, PSTR(\"Test Word\"), PSTR(\"\"));\n shareObj.shareLocalVariable(2, someLong, 3, PSTR(\"Test Long\"), PSTR(\"\"));\n shareObj.shareLocalVariable(3, someString, 3, sizeof(someString), PSTR(\"Test String\"), PSTR(\"\"));\n\n // UDP SV Master configuration:\n shareObj.messengerObject(messenger);\n\n // Set up tracking of addresses toward which we will play master and ask for values.\n messenger.trackAddress(slaveAddr);\n\n\n // Test connection over UDP to test rig slave:\n Serial << F(\"\\nTEST #5: Sending UDP data (SharedVariables) to test rig slave on 192.168.10.2...\\n\");\n testRigSlaveVariable = random(-10000, 10000);\n testRigSlaveTestStatus = 0;\n shareObj.startBundle(slaveAddr);\n shareObj.setRemoteVariableOverUDP((int)testRigSlaveVariable, slaveAddr, 0);\n shareObj.getRemoteVariableOverUDP(slaveAddr, 0);\n shareObj.endBundle();\n lDelay(500);\n if (testRigSlaveTestStatus == 1) {\n Serial << F(\" => OK: Ethernet UDP data exchange with test rig slave on 192.168.10.2\\n\");\n flashStatusLed(2);\n } else if (testRigSlaveTestStatus == 2) {\n Serial << F(\" => ERROR: Response OK, but wrong value!\\n\");\n flashStatusLed(1);\n } else {\n Serial << F(\" => ERROR: No response to UDP message sent to test rig slave\\n\");\n flashStatusLed(1);\n }\n\n\n\n\n // Done with initial tests:\n delay(1000);\n\n // Test display:\n display.clearDisplay();\n display.drawBitmap(0, 0, SKAARHOJ_Logo, 128, 13, WHITE);\n display.setCursor(0, 18);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display.println(\"DONE\");\n display.display(B1);\n\n Serial << F(\"\\nInitial Linear Test done, freeMemory()=\") << freeMemory() << \"\\n\";\n flashStatusLed(4);\n flashStatusLed(4);\n flashStatusLed(4);\n delay(2000);\n}\n\nunsigned long runLoopCounter, prev_runLoopCounter, prevTime;\nunsigned long errorInstances = 0;\nbool ethernetError = false;\n\nvoid loop() {\n\n lDelay(0); // Runloops\n\n // Rotate colors on Status LED, so we can see the run loop is alive!\n if (errorInstances > 0) {\n digitalWrite(13, (((millis() >> 8) & 0x1) != 0) || !((millis() >> 9) % (errorInstances + 1))); // Red\n digitalWrite(15, (((millis() >> 8) & 0x1) != 0) || !((millis() >> 9) % (errorInstances + 1))); // Green\n digitalWrite(14, 1); // Blue\n } else {\n // digitalWrite(13, ((millis() >> 8) & 0x3) != 0); // Red\n // digitalWrite(15, ((millis() >> 8) & 0x3) != 1); // Green\n // digitalWrite(14, ((millis() >> 8) & 0x3) != 2); // Blue\n digitalWrite(13, 1); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, ((millis() >> 8) & 0x1)); // Blue\n }\n\n // Talk to test rig slave:\n testRigSlaveVariable = random(-10000, 10000);\n testRigSlaveTestStatus = 0;\n shareObj.startBundle(slaveAddr);\n shareObj.setRemoteVariableOverUDP((int)testRigSlaveVariable, slaveAddr, 0);\n shareObj.getRemoteVariableOverUDP(slaveAddr, 0);\n shareObj.endBundle();\n\n unsigned long timer = millis();\n do {\n TCPServer.runLoop(); // Keep TCPServer running\n messenger.runLoop(); // Listening for UDP incoming\n }\n while (testRigSlaveTestStatus == 0 && !sTools.hasTimedOut(timer, 200));\n\n if (testRigSlaveTestStatus != 1) {\n flashStatusLed(1);\n flashStatusLed(1);\n flashStatusLed(1);\n Serial << F(\" => ERROR: UDP message response testRigSlaveTestStatus != 1. testRigSlaveTestStatus=\") << testRigSlaveTestStatus << F(\"\\n\");\n if (!ethernetError) {\n errorInstances++;\n ethernetError = true;\n }\n } else {\n ethernetError = false;\n }\n\n if (millis() / 1000 != prevTime) {\n prevTime = millis() / 1000;\n prev_runLoopCounter = runLoopCounter;\n runLoopCounter = 0;\n\n Serial << F(\"RunLoopCount: \") << prev_runLoopCounter << F(\" - \") << F(\"Error Instances:\") << errorInstances << F(\"\\n\");\n }\n\n // Test display:\n display.clearDisplay();\n display.setCursor(0, 0);\n display.setTextColor(WHITE);\n display.setTextSize(1);\n display << F(\"RunLoopCount: \") << prev_runLoopCounter << F(\"\\n\");\n display << F(\"Error Instances:\") << errorInstances << F(\"\\n\");\n display.setTextSize(2);\n display << millis();\n display.setTextSize(1);\n display << \" A:\" << analogRead(0);\n display.display(B1);\n\n\n runLoopCounter++;\n}\n\n\n/**\n * Use this to make delays (because it will continously read digital inputs and update those counters!)\n */\nvoid lDelay(uint16_t delayVal) {\n unsigned long timer = millis();\n do {\n TCPServer.runLoop(); // Keep TCPServer running\n messenger.runLoop(); // Listening for UDP incoming\n }\n while (delayVal > 0 && !sTools.hasTimedOut(timer, delayVal));\n}\n\nvoid statusLed(uint8_t color) {\n switch (color) {\n case 1: // red\n digitalWrite(13, 0); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 1); // Blue\n break;\n case 2: // green\n digitalWrite(13, 1); // Red\n digitalWrite(15, 0); // Green\n digitalWrite(14, 1); // Blue\n break;\n case 3: // blue\n digitalWrite(13, 1); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 0); // Blue\n break;\n case 4: // white\n digitalWrite(13, 0); // Red\n digitalWrite(15, 0); // Green\n digitalWrite(14, 0); // Blue\n break;\n case 5: // yellow\n digitalWrite(13, 0); // Red\n digitalWrite(15, 0); // Green\n digitalWrite(14, 1); // Blue\n break;\n case 6: // cyan\n digitalWrite(13, 1); // Red\n digitalWrite(15, 0); // Green\n digitalWrite(14, 0); // Blue\n break;\n case 7: // magenta\n digitalWrite(13, 0); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 0); // Blue\n break;\n default: // off\n digitalWrite(13, 1); // Red\n digitalWrite(15, 1); // Green\n digitalWrite(14, 1); // Blue\n break;\n }\n}\n\n\nvoid flashStatusLed(uint8_t color) {\n statusLed(color);\n delay(500);\n statusLed(0);\n delay(500);\n}\n\nbool isAround(int input, int around) {\n return (input + 10 > around && input - 10 < around);\n}\n\n" }, { "alpha_fraction": 0.6333839297294617, "alphanum_fraction": 0.6506828665733337, "avg_line_length": 31.623762130737305, "blob_id": "b0c76a0617d8c28b25cd6c8f5995d86ea976e18c", "content_id": "937cdf8cdcedce295ea70f52896bde5b1340c798", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3295, "license_type": "no_license", "max_line_length": 286, "num_lines": 101, "path": "/DeviceCoreFiles/UniSketchTCPClient/TCPserver_triggerResponse.py", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport socketserver\nimport socket\nimport base64\nimport re\n\n\n\"\"\"\nA simple server for UniSketch TCP Client\n\n- Keeps the connection alive\n- Turns on light in reported HWcs\n- Receives button presses and cycles colors, highlights/dims buttons\n\"\"\"\n\nclass MyTCPHandler(socketserver.BaseRequestHandler):\n\t\n\tdef handle(self):\n\t\tself.request.settimeout(1)\n\t\tbusy = False;\t# This keeps track of the busy/ready state of the client. We can use this to make sure we are not spamming it with data\n\t\t\n\t\tHWCcolor = [2] * 256\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\t# self.request is the TCP socket connected to the client\n\t\t\t\tself.data = self.request.recv(1024).strip()\n\t\t\texcept socket.timeout:\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tif self.data != b'':\n\t\t\t\t\tfor line in self.data.split(b\"\\n\"):\n\t\t\t\t\t\toutputline = \"\"\n\t\t\t\t\t\tprint(\"Client {} sent: '{}<NL>'\".format(self.client_address[0], line.decode('ascii')))\n\n\t\t\t\t\t\tif line == b\"list\":\n\t\t\t\t\t\t\toutputline = \"ActivePanel=1\\n\"\n\t\t\t\t\t\t\tself.panelInitialized = True\n\t\t\t\t\t\t\tbusy = False\n\t\t\t\t\t\tif line == b\"BSY\":\n\t\t\t\t\t\t\tbusy = True\n\n\t\t\t\t\t\tif line == b\"RDY\":\n\t\t\t\t\t\t\tbusy = False\n\n\t\t\t\t\t\tif line == b\"ping\":\n\t\t\t\t\t\t\toutputline = \"ack\\n\"\n\t\t\t\t\t\t\tbusy = False\n\n\t\t\t\t\t\t# Parse map= and turn on the button in dimmed mode for each. \n\t\t\t\t\t\t# We could use the data from map to track which HWcs are active on the panel\n\t\t\t\t\t\tmatch = re.search(r\"^map=([0-9]+):([0-9]+)$\", line.decode('ascii'))\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tHWc = int(match.group(2));\t# Extract the HWc number of the keypress from the match\n\t\t\t\t\t\t\toutputline = \"HWC#{}={}\\n\".format(HWc,5)\n\n\t\t\t\t\t\t# Parse down trigger:\n\t\t\t\t\t\tmatch = re.search(r\"^HWC#([0-9]+)=Down$\", line.decode('ascii'))\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tHWc = int(match.group(1));\t# Extract the HWc number of the keypress from the match\n\n\t\t\t\t\t\t\t# Highlight the button and turn on binary output:\n\t\t\t\t\t\t\toutputline = \"HWC#{}={}\\n\".format(HWc, 4 | 0x20)\n\n\t\t\t\t\t\t\t# Rotate color number:\n\t\t\t\t\t\t\tHWCcolor[HWc] = (HWCcolor[HWc] + 1)%17;\t# Rotate the internally stored color number\n\t\t\t\t\t\t\toutputline = outputline + \"HWCc#{}={}\\n\".format(HWc,HWCcolor[HWc] | 0x80)\t# OR'ing 0x80 to identify the color as an externally imposed color. By default the least significant 6 bits will be an index to a color, but you can OR 0x40 and it will instead accept a rrggbb combination.\n\n\t\t\t\t\t\t# Parse Up trigger:\n\t\t\t\t\t\tmatch = re.search(r\"^HWC#([0-9]+)=Up$\", line.decode('ascii'))\n\t\t\t\t\t\tif match:\n\t\t\t\t\t\t\tHWc = int(match.group(1));\t# Extract the HWc number of the keypress from the match\n\n\t\t\t\t\t\t\t# Dim the button:\n\t\t\t\t\t\t\toutputline = \"HWC#{}={}\\n\".format(HWc,5)\n\n\t\t\t\t\t\t# If outputline not empty, send content back to client:\n\t\t\t\t\t\tif outputline:\n\t\t\t\t\t\t\tself.request.sendall(outputline.encode('ascii'))\n\t\t\t\t\t\t\tprint(\"- Returns: '{}'\".format(outputline.replace(\"\\n\",\"<NL>\")))\n\n\t\t\t\telse:\n\t\t\t\t\tprint(\"{} closed\".format(self.client_address[0]))\n\t\t\t\t\tbreak\n\n\n\nclass ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):\n\tpass\n\nHOST, PORT = \"0.0.0.0\", 9923\n\n# Create the server, binding to localhost on port 9923\nserver = ThreadedTCPServer((HOST, PORT), MyTCPHandler, bind_and_activate=False)\nserver.allow_reuse_address = True\nserver.server_bind()\nserver.server_activate()\n\n# Activate the server; this will keep running until you\n# interrupt the program with Ctrl-C\nserver.serve_forever()\n" }, { "alpha_fraction": 0.675226092338562, "alphanum_fraction": 0.7117248177528381, "avg_line_length": 39.47058868408203, "blob_id": "aca2c40251a2eb867568ffaf86f83a240a9fbd06", "content_id": "b04f5ef23a2ca9eb6ad37a2710d789bc03c63008", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6192, "license_type": "no_license", "max_line_length": 397, "num_lines": 153, "path": "/SKAARDUINO/TestRig_slaveMCU/TestRig_slaveMCU.ino", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "/*****************\n * Example: Basic Shared Variables with UDP messenger (and TCP Telnet server)\n * With this example, the Arduino becomes a UDP messenger server/slave (and Telnet Server as an extra feature, not mandatory) on 192.168.10.99 through which you can get and set values of the shared variables\n *\n * - kasper\n */\n/*****************\n * TO MAKE THIS EXAMPLE WORK:\n * - You must have an Arduino with Ethernet Shield (or compatible such as \"Arduino Ethernet\", http://arduino.cc/en/Main/ArduinoBoardEthernet)\n * - You must make specific set ups in the below lines where the comment \"// SETUP\" is found!\n */\n\n/*\n\tIMPORTANT: If you want to use this library in your own projects and/or products,\n \tplease play a fair game and heed the license rules! See our web page for a Q&A so\n \tyou can keep a clear conscience: http://skaarhoj.com/about/licenses/\n */\n\n\n\n// Including libraries: \n#include <SPI.h>\n#include <Ethernet.h>\n#include <Streaming.h>\n\n\n// MAC address and IP address for this *particular* Arduino / Ethernet Shield!\n// The MAC address is printed on a label on the shield or on the back of your device\nbyte mac[] = { \n 0x10, 0xA2, 0xDA, 0xDE, 0x6E, 0x79 }; // <= SETUP! MAC address of the Arduino\nIPAddress ip(192, 168, 10, 2); // <= SETUP! IP address of the Arduino\n\n\n#include <SkaarhojBufferTools.h>\n\n#include <SkaarhojTCPServer.h>\nSkaarhojTCPServer TCPServer(8899); // Port 8899 used for telnet server\n\n#include <UDPmessenger.h>\nUDPmessenger messenger;\n\n#include <SkaarhojPgmspace.h>\n#include <SharedVariables.h>\n\nSharedVariables shareObj(12); // Number of shared variables we allocate memory to handle (every registered variable consumes 12 bytes of memory)\n\n\n\n// Test shared variables:\nint test_int = 10;\nbool test_bool = true;\nuint8_t test_uint8_t = 12;\nuint16_t test_uint16_t = 12345;\nlong test_long = -1171510507;\nunsigned long test_unsigned_long = 3123456789;\nfloat test_float = 3.14159265;\nchar test_char = 'A';\nchar test_string[] = \"ABCDEF\";\nuint8_t test_array[] = {1, 2, 3, 4};\nint test_array_int[] = { -1, -2000, 30, 30000};\n\nuint16_t test_uint16_t_array[] = {1000, 2000, 3000, 4000}; // Used to just pass a single position...\n\n\n\n\n/**\n * Callback function for telnet incoming lines\n * Passes the TCPserver object on towards the shareObj so it can process the input.\n */\nvoid handleTelnetIncoming() {\n shareObj.incomingASCIILine(TCPServer, TCPServer._server);\n}\n\n/**\n * Callback function for UDP incoming data\n * Passes the messenger object on towards the shareObj so it can process the input.\n */\nvoid UDPmessengerReceivedCommand(const uint8_t cmd, const uint8_t from, const uint8_t dataLength, const uint8_t *dataArray) {\n shareObj.incomingBinBuffer(messenger, cmd, from, dataLength, dataArray);\n}\n\n/**\n * Callback function for shareObj when an external event from UDP or Telnet has changed a local shared variable\n */\nvoid handleExternalChangeOfValue(uint8_t idx) {\n Serial << F(\"Value idx=\") << idx << F(\" changed: \");\n shareObj.printSingleValue(Serial, idx);\n Serial << F(\"\\n\");\n\n if (idx==0) { // Values of idx 0 is incremented immediately.\n test_int++;\n }\n}\n\n\n\n\n#include <MemoryFree.h>\n\nvoid setup() {\n \n // Start the Ethernet:\n Ethernet.begin(mac,ip);\n delay(1000);\n\n // Initialize serial communication at 9600 bits per second:\n Serial.begin(115200);\n Serial.println(\"\\n\\n******* START ********\");\n Serial.println(\"UDP SLAVE on port 8765\\n**********************\");\n\n\n // Initialize Telnet Server and connect to shareObj through callback:\n TCPServer.begin();\n TCPServer.setHandleIncomingLine(handleTelnetIncoming); // Put only the name of the function\n TCPServer.serialOutput(3);\n\n // Initialize UDP messenger and connect to shareObj through callback:\n messenger.registerReceptionCallbackFunction(UDPmessengerReceivedCommand);\n messenger.begin(ip, 8765); // Port number to listen on for UDP packets\n messenger.serialOutput(true); // Remove or comment\n \n shareObj.setExternalChangeOfVariableCallback(handleExternalChangeOfValue);\n\n // NOTE: Names (the first \"PSTR()\" in the argument list of shareLocalVariable()) for sharing variables is the string by which the variable is referenced via the buffer interface - make sure not to name two variables the same or substrings of each other, for instance dont use names like \"My Var\" and \"My Var2\" because \"My Var\" is a substring of \"My Var2\". This may lead to unexpected behaviours.\n shareObj.shareLocalVariable(0, test_int, 3, PSTR(\"Test int\"), PSTR(\"A test integer\"), -1000, 1000);\n/* shareObj.shareLocalVariable(1, test_bool, 1, PSTR(\"Test bool\"), PSTR(\"A test boolean\"));\n shareObj.shareLocalVariable(2, test_uint8_t, 3, PSTR(\"Test uint8_t\"), PSTR(\"A test uint8_t\"), 1, 100);\n shareObj.shareLocalVariable(3, test_uint16_t, 3, PSTR(\"Test uint16_t\"), PSTR(\"A test uint16_t\"), 1, 40000);\n shareObj.shareLocalVariable(4, test_long, 3, PSTR(\"Test long\"), PSTR(\"A test long\"));\n shareObj.shareLocalVariable(5, test_unsigned_long, 3, PSTR(\"Test unsigned long\"), PSTR(\"A test unsigned long\"));\n shareObj.shareLocalVariable(6, test_float, 3, PSTR(\"Test float\"), PSTR(\"A test float\"), -10, 10);\n shareObj.shareLocalVariable(7, test_char, 3, PSTR(\"Test char\"), PSTR(\"A test char\"));\n shareObj.shareLocalVariable(8, test_string, sizeof(test_string), 3, PSTR(\"Test string\"), PSTR(\"A test string\"));\n shareObj.shareLocalVariable(9, test_array, sizeof(test_array), 3, PSTR(\"Test array, uint8_t\"), PSTR(\"A test array with single bytes\"));\n shareObj.shareLocalVariable(10, test_array_int, sizeof(test_array_int), 3, PSTR(\"Test array, integer\"), PSTR(\"A test array with integers\"));\n shareObj.shareLocalVariable(11, test_uint16_t_array[1], 3, PSTR(\"Test element uint16_t_array[1]\"), PSTR(\"Testing an individual position of an arrayTesting an individual position of an arrayTesting an individual position of an array\"), 1, 10000);\n*/\n Serial << F(\"Overview of variables:\\n\");\n shareObj.printOverview(Serial);\n Serial << F(\"Values:\\n\");\n shareObj.printValues(Serial);\n\n\n\n\n Serial << F(\"freeMemory()=\") << freeMemory() << \"\\n\";\n}\n\nvoid loop() {\n TCPServer.runLoop(); // Keep TCPServer running\n messenger.runLoop(); // Listening for UDP incoming\n}\n" }, { "alpha_fraction": 0.772870659828186, "alphanum_fraction": 0.772870659828186, "avg_line_length": 78.5, "blob_id": "4fea912c17d6c39e5b843a353ffd5ca2b6e87332", "content_id": "4fb360b264dd8a75bd1042b3c96f58b778996fa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 329, "license_type": "no_license", "max_line_length": 200, "num_lines": 4, "path": "/Various/Eagle/Notes.txt", "repo_name": "wlyzcz/SKAARHOJ-Open-Engineering", "src_encoding": "UTF-8", "text": "The SKAARHOJ.lbr eagle library can be included in EAGLE by:\n- opening EAGLE\n- Chose the menu Options > Directories >\n- For “Libraries”, add the path to the SKAARHOJ library with a semicolon, for instance “$EAGLEDIR/lbr:/Path-to-directory/Eagle” where “Eagle” is the directory in which you store the SKAARHOJ.lbr file." } ]
6
ishita159/OpenCV-and-deep-learning
https://github.com/ishita159/OpenCV-and-deep-learning
54cdb625ffc0c6e858d453a3c574c2fff4aab37a
b539f8073b1386a7aa5f642d71e660574c2b68cd
4848e6d8320f5d64c74faf0825a8b01e8bb3c814
refs/heads/master
2022-11-12T23:29:29.455794
2020-06-24T11:42:25
2020-06-24T11:42:25
274,653,598
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7476190328598022, "avg_line_length": 29.14285659790039, "blob_id": "6420d5f426a88b8c018995589f3bc66494ba321c", "content_id": "5edc83d420c7b07a27db49668dac4f43eaa1198f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 63, "num_lines": 7, "path": "/01-Image-Basics-with-OpenCV/.ipynb_checkpoints/01-Opening-Image-Files-OpenCV-checkpoint.py", "repo_name": "ishita159/OpenCV-and-deep-learning", "src_encoding": "UTF-8", "text": "import cv2\n\nimg = cv2.imread('../DATA/00-puppy.jpg',cv2.IMREAD_GRAYSCALE)\n# Show the image with OpenCV\ncv2.imshow('window_name',img)\n# Wait for something on keyboard to be pressed to close window.\ncv2.waitKey()" } ]
1
arthurzenika/checkov
https://github.com/arthurzenika/checkov
d7fabcbdff8188d26bbdaf820927e3568e487521
e490780f294cae91782aa2fb44418e2865cb7c9c
561f20d521cf40c467829e3070a14b5736651963
refs/heads/master
2023-07-05T19:23:01.490356
2021-08-18T15:05:03
2021-08-18T15:17:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4117647111415863, "alphanum_fraction": 0.7058823704719543, "avg_line_length": 16, "blob_id": "3187a8237ad5993146ab88f1d29b1b45635d3e99", "content_id": "7c1a0b08aae9ac456439711212b5158163915974", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 17, "license_type": "permissive", "max_line_length": 16, "num_lines": 1, "path": "/kubernetes/requirements.txt", "repo_name": "arthurzenika/checkov", "src_encoding": "UTF-8", "text": "checkov==2.0.363\n" }, { "alpha_fraction": 0.3499999940395355, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 19, "blob_id": "d858aa8cdb13379a2444f620c36cff7228b3f245", "content_id": "9b3583523b46c8616286fe555f71e8dd8cdaae9b", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20, "license_type": "permissive", "max_line_length": 19, "num_lines": 1, "path": "/checkov/version.py", "repo_name": "arthurzenika/checkov", "src_encoding": "UTF-8", "text": "version = '2.0.363'\n" } ]
2
jlz293/AI-Puzzle-Project
https://github.com/jlz293/AI-Puzzle-Project
52a11d2999d82d9a8860d1cdb55fb5cec9f33f0f
9671251bc2da770cfd10614d972b922cf26f440e
55a205d8ad4744507975c05f72f55eca037c74dc
refs/heads/master
2021-05-23T14:17:27.007892
2020-05-09T20:15:23
2020-05-09T20:15:23
253,334,230
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5803619623184204, "alphanum_fraction": 0.5879254341125488, "avg_line_length": 29.46502113342285, "blob_id": "a7bd2a1405c5f43f9aed73e5e196a6144cb99f14", "content_id": "969c5402d6e2e548045790ac3d4e45151b21120a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7404, "license_type": "no_license", "max_line_length": 142, "num_lines": 243, "path": "/A-Star/graph_search.py", "repo_name": "jlz293/AI-Puzzle-Project", "src_encoding": "UTF-8", "text": "import math # Required to take the absolute value in the manhattan sum calculation\nimport heapq # Required to take care of the priority list of the frontier\nimport copy # Required to copy and deep copy arrays and 2d-arrays\n\n\nclass Node:\n def __init__(self, puzzle, goal, gn, path_to_curr, last_move=None):\n\n # Initializing all the relevant attributes\n self.puzzle = puzzle\n self.path_to_curr = path_to_curr\n self.gn = gn\n\n self.hn = manhattan_distance(self.puzzle, goal)\n self.fn = self.gn + self.hn\n\n if last_move:\n self.last_move = last_move\n else:\n self.last_move = \"\"\n\n # This returns a string representation of the ordered moves that occurred to get to the current node\n def string_path_moves(self):\n path = \"\"\n for node in self.path_to_curr:\n path += node.last_move + \" \"\n path += self.last_move\n return path\n\n # This returns a string representation of the f(n) values along the path to get to the current node\n def string_path_fn_values(self):\n path = \"\"\n for node in self.path_to_curr:\n path += str(int(node.fn)) + \" \"\n path += str(int(self.fn))\n return path\n\n # This is required to be able to compare nodes to the goal and previously expanded nodes\n def __eq__(self, other):\n my_string = \"\"\n other_string = \"\"\n for i in range(len(self.puzzle)):\n for j in range(len(self.puzzle[i])):\n my_string += self.puzzle[i][j]\n for k in range(len(other.puzzle)):\n for m in range(len(other.puzzle[k])):\n other_string += other.puzzle[k][m]\n\n return my_string == other_string\n\n # These operators are necessary to utilize the heap.\n # Not entirely sure about the implementation of heapq so I defined both\n def __lt__(self, other):\n return self.fn < other.fn\n\n def __gt__(self, other):\n return self.fn > other.fn\n\n # This was mostly for debugging, made it easier to be able to print the node\n def __str__(self):\n string = string_puzzle(self.puzzle)\n path = \"\"\n for node in self.path_to_curr:\n path += node.last_move\n path += self.last_move\n string += \"\\nF(n) = \" + str(self.fn) + \"\\nG(n) = \" + str(self.gn) + \"\\nH(n) = \" + str(self.hn) + \"\\nPath to Curr: \" + str(path) + \"\\n\"\n return string\n\n\n# Returns a list of all possible moves and the swap that would consequently occur in the array\ndef all_children_list(node):\n puzzle = node.puzzle\n zero_i, zero_j = index_at_value(puzzle, \"0\")\n children = []\n\n # Every move has a pair to be swapped: [Move, swap_pos[i][j], zero_pos[i][j]]\n moves = [\n\n [\"L\", [zero_i, zero_j - 1], [zero_i, zero_j]],\n [\"R\", [zero_i, zero_j + 1], [zero_i, zero_j]],\n [\"U\", [zero_i - 1, zero_j], [zero_i, zero_j]],\n [\"D\", [zero_i + 1, zero_j], [zero_i, zero_j]],\n ]\n\n # Add the moves that are not out of bounds to the return list of possible moves\n\n # Left\n if zero_j != 0:\n children.append(moves[0])\n\n # Right\n if zero_j != 3:\n children.append(moves[1])\n\n # Up\n if zero_i != 0:\n children.append(moves[2])\n\n # Down\n if zero_i != 3:\n children.append(moves[3])\n\n return children\n\n\n# Parses lines from the input file into an array\ndef lines_to_2d_array(lines):\n for i in range(len(lines)):\n lines[i] = lines[i].strip(\"\\n\").split(\" \")\n\n return lines\n\n# Returns a string of a 2d-array\ndef string_puzzle(puzzle):\n string = \"\"\n for row in puzzle:\n for num in row:\n string += num\n string += \" \"\n string += \"\\n\"\n return(string)\n\n\n# Calculates the manhattan distance between the root and goal\ndef manhattan_distance(root, goal):\n answer = 0\n for i in range(0, 4, 1):\n for j in range(0, 4, 1):\n root_ij = root[i][j]\n i_root = i\n j_root = j\n\n index_set = index_at_value(goal, root_ij)\n\n i_goal = index_set[0]\n j_goal = index_set[1]\n\n answer += (math.fabs(i_goal - i_root) + math.fabs(j_goal - j_root))\n\n return answer\n\n\n# Finds the 2d-array index of a given val in a given array, useful primarily for the manhattan distance calculation\ndef index_at_value(puzzle, val):\n\n for i in range(0,4):\n for j in range(0,4):\n if puzzle[i][j] == val:\n return [i, j]\n print(\"Input Error: No 0 in puzzle, no movement possible.\")\n raise\n\n\n# Parses the input file and initializes the root and goal node\ndef input(file_name):\n file = open(file_name, \"r\")\n list_of_lines = file.readlines()\n\n puzzle_lines = list_of_lines[0:4]\n goal_lines = list_of_lines[5:9]\n root_array = lines_to_2d_array(puzzle_lines)\n goal_array = lines_to_2d_array(goal_lines)\n\n root_node = Node(root_array, goal_array, 0, [])\n goal_node = Node(goal_array, goal_array, -1, None, None)\n file.close()\n return [root_node, goal_node]\n\n\n# Main algorithm\ndef a_star(root, goal):\n\n # Setup\n curr_node = root\n expanded_nodes = [root]\n frontier = []\n heapq.heapify(frontier)\n\n # Do\n while True:\n\n # Setup to generate children\n child_moves_lst = all_children_list(curr_node)\n children = []\n\n distance_to_child = curr_node.gn + 1\n\n path_to_child = copy.copy(curr_node.path_to_curr)\n path_to_child.append(curr_node)\n\n # Generate all possible children:\n for child_move in child_moves_lst:\n move = child_move[0]\n\n # Swap the 0 with the relevant tile according to the corresponding move direction\n child_puzzle = copy.deepcopy(curr_node.puzzle)\n zero_swap_partner = child_puzzle[child_move[1][0]][child_move[1][1]]\n child_puzzle[child_move[2][0]][child_move[2][1]] = zero_swap_partner\n child_puzzle[child_move[1][0]][child_move[1][1]] = \"0\"\n\n # Generate the new node\n new_node = Node(child_puzzle, goal.puzzle, distance_to_child, path_to_child, move)\n children.append(new_node)\n\n # Check if children have previously been generated and add to expanded_nodes and frontier:\n for node in children:\n if node not in expanded_nodes:\n expanded_nodes.append(node)\n frontier.append(node)\n if node == goal:\n return node, expanded_nodes\n\n # Find min_node, remove from frontier, and make curr_node\n heapq.heapify(frontier)\n min_node = heapq.heappop(frontier)\n curr_node = min_node\n\n\n# Structures and creates the output file\ndef output(node, expanded, root, goal, input_file):\n output_file = \"Output-\" + input_file\n file = open(output_file, \"w\")\n file.write(string_puzzle(root.puzzle))\n file.write(\"\\n\")\n file.write(string_puzzle(goal.puzzle))\n file.write(\"\\n\")\n file.write(str(node.gn))\n file.write(\"\\n\")\n file.write(str(len(expanded)))\n file.write(\"\\n\")\n file.write(node.string_path_moves())\n file.write(\"\\n\")\n file.write(node.string_path_fn_values())\n file.close()\n\n\nif __name__ == \"__main__\":\n\n input_file = \"Input2.txt\"\n\n root, goal = input(input_file)\n winner_node, list_of_nodes = a_star(root, goal)\n output(winner_node, list_of_nodes, root, goal, input_file)\n\n" }, { "alpha_fraction": 0.7804877758026123, "alphanum_fraction": 0.7874564528465271, "avg_line_length": 56.33333206176758, "blob_id": "7fdd337733c7b57ff967d9331409a0ccd78cbd48", "content_id": "40d34047cc388ffa4cce14f8a39a9cd0741fc823", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 861, "license_type": "no_license", "max_line_length": 371, "num_lines": 15, "path": "/README.md", "repo_name": "jlz293/AI-Puzzle-Project", "src_encoding": "UTF-8", "text": "# AI-Puzzle-Project\nThis repository maintains the python implementations for two puzzle solvers in Artificial Intelligence\n\n# 15-Puzzle Problem Solved by A-Star\n\nThe main functional code for this project is in the file called graph_search.py\n\nThe code runs like any python file, simply run it through the terminal. The input file that is to be operated on can be changed in the code itself. To do this simply go to line 239 in graph_search.py, here the input_file defined. It is currently set to run Input4.txt if you want to change this, simply change the string to the title of the file you would like to input. \n\nThe A* (A-Star) algorithm was used to find the goal sequence.\n\nThe heuristic used is the sum of the manhattan distances (taxicab distance) between the tiles of the root and the goal state puzzle. \n\n\n# Futoshiki Puzzle by Backtracking Algorithm\n\n" }, { "alpha_fraction": 0.507594108581543, "alphanum_fraction": 0.5138481259346008, "avg_line_length": 27.594890594482422, "blob_id": "a62c6ba04d2dcc677d788b734590b79d758d5cb7", "content_id": "856ff5a20aeb21a8cc3d34ee808fbd75c2cf8a82", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7835, "license_type": "no_license", "max_line_length": 128, "num_lines": 274, "path": "/Futoshiki/main.py", "repo_name": "jlz293/AI-Puzzle-Project", "src_encoding": "UTF-8", "text": "import copy\n\nclass Variable:\n def __init__(self, val):\n self.val = val\n self.domain = [1, 2, 3, 4, 5]\n self.greater_than = []\n self.less_than = []\n\n def add_greater_than(self, neighbor):\n if neighbor not in self.greater_than:\n self.greater_than.append(neighbor)\n\n def add_less_than(self, neighbor):\n if neighbor not in self.less_than:\n self.less_than.append(neighbor)\n\n def update_domain(self):\n to_remove = []\n initial_domain = [1, 2, 3, 4, 5]\n for neighbor in self.greater_than:\n for option in self.domain:\n if option <= int(neighbor.val) and neighbor.val != \"0\":\n to_remove.append(option)\n\n for neighbor in self.less_than:\n for opt in self.domain:\n if opt >= int(neighbor.val) and neighbor.val != \"0\":\n to_remove.append(opt)\n\n to_remove = list(set(to_remove))\n for a in to_remove:\n initial_domain.remove(a)\n self.domain = initial_domain\n return\n\n def update_val(self):\n if self.val == '0' and len(self.domain) == 1:\n self.val = str(self.domain[0])\n return True\n return False\n\n def impossible(self):\n if len(self.domain) == 0:\n return True\n else:\n return False\n\n def consistent(self):\n if self.val != '0':\n for opt in self.greater_than:\n if self.val <= opt.val and opt.val != '0':\n return False\n for opt in self.less_than:\n if self.val >= opt.val and opt.val != '0':\n return False\n\n return True\n\n\n\nclass Node:\n def __init__(self, var, hor, vert, copy_flag=False):\n self.variables = var\n self.horizontal = hor\n self.vertical = vert\n if not copy_flag:\n self.initialize_horizontals()\n self.initialize_verticals()\n\n def initialize_horizontals(self):\n for i in range(len(self.horizontal)):\n for j in range(len(self.horizontal[i])):\n\n left_var = self.variables[i][j]\n right_var = self.variables[i][j+1]\n\n if self.horizontal[i][j] == '<':\n left_var.add_less_than(right_var)\n right_var.add_greater_than(left_var)\n elif self.horizontal[i][j] == '>':\n left_var.add_greater_than(right_var)\n right_var.add_less_than(left_var)\n\n return\n\n\n def initialize_verticals(self):\n for i in range(len(self.vertical)):\n for j in range(len(self.vertical[i])):\n\n up_var = self.variables[i][j]\n down_var = self.variables[i+1][j]\n\n if self.vertical[i][j] == '^':\n up_var.add_less_than(down_var)\n down_var.add_greater_than(up_var)\n\n elif self.vertical[i][j] == 'v':\n up_var.add_greater_than(down_var)\n down_var.add_less_than(up_var)\n\n return\n\n def forward_check(self):\n continue_flag = False\n for i in range(len(self.variables)):\n for j in range(len(self.variables[i])):\n self.variables[i][j].update_domain()\n if self.variables[i][j].update_val():\n continue_flag = True\n if self.variables[i][j].impossible():\n raise Exception(\"Puzzle Can not be Solved. IMPOSSIBLE\")\n\n return continue_flag\n\n\n def forward(self):\n flag = True\n while flag:\n flag = self.forward_check()\n self.initialize_verticals()\n self.initialize_horizontals()\n if self.done():\n return True\n return False\n\n\n def done(self):\n answer = True\n for i in range(len(self.variables)):\n for j in range(len(self.variables[i])):\n if self.variables[i][j].val == '0':\n answer = False\n return answer\n\n def select_unassigned_variable(self):\n i_index = 0\n j_index = 0\n first_zero_flag = True\n mult_index = []\n for i in range(len(self.variables)):\n for j in range(len(self.variables[i])):\n if self.variables[i][j].val == '0':\n if first_zero_flag:\n first_zero_flag = False\n i_index = i\n j_index = j\n mult_index.append([i, j])\n elif len(self.variables[i_index][j_index].domain) > len(self.variables[i][j].domain):\n mult_index = []\n mult_index.append([i, j])\n i_index = i\n j_index = j\n elif len(self.variables[i_index][j_index].domain) == len(self.variables[i][j].domain):\n mult_index.append([i, j])\n\n if len(mult_index) > 1:\n max_neighbors = 0\n max_index = mult_index[0]\n for pair in mult_index:\n neighbors = len(self.variables[pair[0]][pair[1]].less_than) + len(self.variables[pair[0]][pair[1]].greater_than)\n if max_neighbors < neighbors:\n max_neighbors = neighbors\n max_index = pair\n return self.variables[max_index[0]][max_index[1]]\n else:\n return self.variables[i_index][j_index]\n\n\n def impossible(self):\n answer = False\n for row in self.variables:\n for var in row:\n if var.impossible():\n answer = True\n return answer\n\n\n def update_domains(self):\n for row in self.variables:\n for var in row:\n var.update_domain()\n\n return\n\n def consistent(self):\n for row in self.variables:\n for var in row:\n if not var.consistent():\n return False\n return True\n\n def __repr__(self):\n string = \"\"\n for row in self.variables:\n for var in row:\n string += str(var.val)\n string += \" \"\n string += \"\\n\"\n return string\n\n\n\n\ndef initialize_variables(variables):\n\n for i in range(len(variables)):\n for j in range(len(variables[i])):\n obj = Variable(variables[i][j])\n variables[i][j] = obj\n return variables\n\n\n\n\n\n\n# Parses lines from the input file into an array\ndef lines_to_2d_array(lines):\n for i in range(len(lines)):\n lines[i] = lines[i].strip(\"\\n\").strip(\"\\r\").strip(\" \").split(\" \")\n\n return lines\n\n\n# Parses the input file and initializes the root and goal node\ndef input(file_name):\n file = open(file_name, \"r\")\n list_of_lines = file.readlines()\n\n variables = lines_to_2d_array(list_of_lines[0:5])\n variables = initialize_variables(variables)\n horizontal = lines_to_2d_array(list_of_lines[6:11])\n vertical = lines_to_2d_array(list_of_lines[12:16])\n file.close()\n\n return Node(variables, horizontal, vertical)\n\n\n\n\n\ndef backtracking_search(node):\n if not node.consistent():\n return False\n if node.done():\n return node\n\n curr_var = node.select_unassigned_variable()\n curr_domain = curr_var.domain\n\n for value in curr_domain:\n curr_var.val = str(value)\n node.update_domains()\n # print(node)\n result = backtracking_search(node)\n if result:\n return result\n else:\n curr_var.val = \"0\"\n node.update_domains()\n\n return False\n\nif __name__ == \"__main__\":\n\n input_file = \"Input3.txt\"\n node = input(input_file)\n node.forward()\n\n\n print(node)\n print(backtracking_search(node))\n" } ]
3
375743506/test
https://github.com/375743506/test
d03111bf9feb42346368a329b66d2a69527b9f09
0eefde84d8e4316e2d3fe1656411f946414bc41b
bb9212cc7b43b371765452ba07a08d9d5f68552d
refs/heads/main
2023-02-03T04:55:13.685812
2020-12-22T12:09:36
2020-12-22T12:09:36
322,481,089
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.40909090638160706, "alphanum_fraction": 0.40909090638160706, "avg_line_length": 21, "blob_id": "587d6771a7dbf83bb8acd71b7ae73719bb647324", "content_id": "95bb2fed5d611d809473da0ddf346bd119401a06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46, "license_type": "no_license", "max_line_length": 21, "num_lines": 1, "path": "/t1.py", "repo_name": "375743506/test", "src_encoding": "UTF-8", "text": "print(\"袁航最帅!!!!!!!!\")\n" }, { "alpha_fraction": 0.761904776096344, "alphanum_fraction": 0.761904776096344, "avg_line_length": 6, "blob_id": "3421235c864c627725f6d2fbcde7acfbbad9c2c3", "content_id": "bce62cfde4cc0effee5d34820f21a61fbc2568fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 29, "license_type": "no_license", "max_line_length": 8, "num_lines": 3, "path": "/README.md", "repo_name": "375743506/test", "src_encoding": "UTF-8", "text": "# test\n项目描述\ngogogogo\n" } ]
2
Carlosted/TicTacToe
https://github.com/Carlosted/TicTacToe
3481e1b368925d278eaa3d3db679c44a124f6aea
ed6619ddde6506fe695a1ac060c3b9fc04422f28
fc17b49546096eafcfdabab0b2fbe55549be3d47
refs/heads/master
2023-02-13T14:28:36.716152
2021-01-07T02:00:46
2021-01-07T02:00:46
326,092,198
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7504726052284241, "avg_line_length": 28.44444465637207, "blob_id": "b2b7da9a3c7d81ee5b21824aef0e6f080f61c9c1", "content_id": "f6b508adf2ab5d1b786cbb4dc23f34d4aebcdc21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 529, "license_type": "no_license", "max_line_length": 84, "num_lines": 18, "path": "/README.md", "repo_name": "Carlosted/TicTacToe", "src_encoding": "UTF-8", "text": "# TicTacToe\n\nThis is me making a tictactoe game and learning git\n\n## Objectives\n1. make a simple 2 player TicTacToe game\n2. add a bot functionality\n3. you could switch between 2 player and bot mode\n4. add MinMax algorithm to the bot (maybe make the algorithm in c++???)\n\n## Fixes or improvements\n- Putting every graphic element in show function\n- Making order into the messy code\n- Fixing the grid cordinates system so I don't have to reverse the tuples (maybe???)\n\n## Next step\n- making the bot actually do something\n- **Fixes**" }, { "alpha_fraction": 0.39584389328956604, "alphanum_fraction": 0.4447541832923889, "avg_line_length": 28.893939971923828, "blob_id": "d00af68555b5e26441dd58dace33ab4fa8427004", "content_id": "7d367d294a1eb234167c46ffe3f64e1ce4388eb0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3946, "license_type": "no_license", "max_line_length": 74, "num_lines": 132, "path": "/main.py", "repo_name": "Carlosted/TicTacToe", "src_encoding": "UTF-8", "text": "import pygame\nimport numpy as np\n\nif __name__ == \"__main__\":\n pygame.init()\n\n # Loading images\n appIcon = pygame.image.load(\"Images/appIcon.png\")\n rs = (150, 150)\n xImage = pygame.transform.scale(pygame.image.load(\"Images/X.png\"), rs)\n oImage = pygame.transform.scale(pygame.image.load(\"Images/O.png\"), rs)\n\n # Global variables\n dimensions = (600, 600)\n colorWHITE = (255, 255, 255)\n run = True\n player = 1\n board = np.zeros((3, 3), int)\n isBot = False\n\n # Create screen\n screen = pygame.display.set_mode(dimensions)\n\n # Title and icon\n pygame.display.set_caption(\"TicTacToe\")\n pygame.display.set_icon(appIcon)\n\n # Initial screen\n def resetGame():\n # White background and lines\n screen.fill(colorWHITE)\n pygame.draw.line(screen, (51), (0, 200), (600, 200))\n pygame.draw.line(screen, (51), (0, 400), (600, 400))\n pygame.draw.line(screen, (51), (200, 0), (200, 600))\n pygame.draw.line(screen, (51), (400, 0), (400, 600))\n\n def winCheck():\n # Checking for diagonals\n if (\n board[0][0] != 0\n and board[0][0] == board[1][1]\n and board[1][1] == board[2][2]\n ):\n return ((0, 0), (2, 2))\n if (\n board[0][2] != 0\n and board[0][2] == board[1][1]\n and board[1][1] == board[2][0]\n ):\n return ((0, 2), (2, 0))\n # Checking for rows and columns\n for i in range(3):\n if (\n board[i][0] != 0\n and board[i][0] == board[i][1]\n and board[i][1] == board[i][2]\n ):\n return ((i, 0), (i, 2))\n if (\n board[0][i] != 0\n and board[0][i] == board[1][i]\n and board[1][i] == board[2][i]\n ):\n return ((0, i), (2, i))\n for i in range(3):\n for j in range(3):\n # Checking for not tie\n if board[i][j] == 0:\n return 0\n return None\n\n def endGame(cords):\n # Reversing them because I fucked up\n cord0 = cords[0][::-1]\n cord1 = cords[1][::-1]\n if cords == 0:\n print(\"Tie\")\n else:\n pygame.draw.line(\n screen,\n (200, 0, 40),\n [s * 200 + 100 for s in cord0],\n [f * 200 + 100 for f in cord1],\n 12,\n )\n\n # Initial update\n resetGame()\n # Game loop\n while run:\n # Input\n click = None\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n click = np.floor(np.divide(pygame.mouse.get_pos(), 200))\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_TAB:\n isBot = not isBot\n resetGame()\n player = 1\n board = np.zeros((3, 3), int)\n\n # Update\n # Check for win or tie\n end = winCheck()\n if end:\n endGame(end)\n # Bot turn\n if isBot and player == 2:\n print(\"Bot move\")\n player = 1\n\n if (not isBot or player == 1) and isinstance(click, np.ndarray):\n # updating board and player\n if board[int(click[1])][int(click[0])] == 0:\n board[int(click[1])][int(click[0])] = player\n if player == 1:\n player = 2\n else:\n player = 1\n\n # Show\n for i in range(3):\n for j in range(3):\n if board[j][i] == 1:\n screen.blit(xImage, (i * 200 + 25, j * 200 + 25))\n elif board[j][i] == 2:\n screen.blit(oImage, (i * 200 + 25, j * 200 + 25))\n\n pygame.display.update()\n" } ]
2
WilseErik/lifeJacket
https://github.com/WilseErik/lifeJacket
4c413fad7af784db42d644aa46575dd650a72cf7
dcd0c70c537d97bc467f481fd7c5d34d319de520
aabf8929ba530331546ee5fe704f8f334d4f8155
refs/heads/master
2020-04-11T22:18:03.451636
2019-04-18T18:45:26
2019-04-18T18:45:26
162,131,686
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5299286246299744, "alphanum_fraction": 0.5403624176979065, "avg_line_length": 24.640844345092773, "blob_id": "ed4c4c3355d82e62206fe574f76e43e0ccf4dcc1", "content_id": "5b17fdac0168815e1c6a2728dfa22b397f8448f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3642, "license_type": "no_license", "max_line_length": 80, "num_lines": 142, "path": "/life_jacket.X/inc/gps/jf2_uart.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef JF2_UART_H\n#define\tJF2_UART_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n/**\n * @brief Initializes the UART module\n * @param void\n * @return void\n */\nvoid jf2_uart_init();\n\n/**\n * @brief Writes a byte over the uart interface.\n * @param data - The data to send.\n * @return void\n */\nvoid jf2_uart_write(uint8_t data);\n\n/**\n * @brief Write a string over the uart interface.\n * @param data - The null terminated data to send.\n * @return void\n */\nvoid jf2_uart_write_string(const char* data);\n\n/**\n * @brief Write a string over the uart interface.\n * @brief nbr_of_bytes - The number of bytes to send.\n * @param data - The null terminated data to send.\n * @return void\n */\nvoid jf2_uart_write_array(uint16_t nbr_of_bytes, const uint8_t* data);\n\n/**\n * @brief Checks if the write buffer is empty.\n * @return True if the write buffer is empty.\n */\nbool jf2_uart_is_write_buffer_empty(void);\n\n/**\n * @brief Gets a byte from the receive buffer.\n * @param index - The index of the byte to get.\n * @return The byte in the receive buffer at the specified index.\n */\nuint8_t jf2_uart_get(uint16_t index);\n\n/**\n * @brief Gets the size (in number of elements) of the receive buffer.\n * @param void\n * @return The size of the reveive buffer.\n */\nuint16_t jf2_uart_get_receive_buffer_size(void);\n\n/**\n * @brief Checks if the receive buffer is empty.\n * @param void\n * @return True if the receive buffer is empty, false otherwise.\n */\nbool jf2_uart_is_receive_buffer_empty(void);\n\n/**\n * @brief Clears the receive buffer.\n * @param void\n * @return void\n */\nvoid jf2_uart_clear_receive_buffer(void);\n\n/**\n * @brief Enables/disables the characters received from the GPS receiver to\n * be echoed onto the debug uart channel.\n */\nvoid jf2_uart_enable_debug_uart_echo(bool enable);\n\n/**\n * @brief Enables the UART receive interrupt.\n * @details This interrupt will affect the transmit and receive buffer.\n */\nstatic inline void jf2_uart_enable_rx_interrupt()\n{\n IEC0bits.U1RXIE = 1;\n}\n\n/**\n * @brief Disables the UART receive interrupt.\n * @details This interrupt will affect the transmit and receive buffer.\n */\nstatic inline void jf2_uart_disable_rx_interrupt()\n{\n IEC0bits.U1RXIE = 0;\n}\n\n/**\n * @brief Enables the UART transmit interrupt.\n * @details This interrupt will affect the transmit buffer.\n */\nstatic inline void jf2_uart_enable_tx_interrupt()\n{\n IEC0bits.U1TXIE = 1;\n}\n\n/**\n * @brief Disables the UART transmit interrupt.\n * @details This interrupt will affect the transmit buffer.\n */\nstatic inline void jf2_uart_disable_tx_interrupt()\n{\n IEC0bits.U1TXIE = 0;\n}\n\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* JF2_UART_H */\n\n" }, { "alpha_fraction": 0.4034665524959564, "alphanum_fraction": 0.40683677792549133, "avg_line_length": 25.615385055541992, "blob_id": "2f642a19649c4b0bae4badcc1730f43f0776a646", "content_id": "8764210554b052d99ac1d353a21f1dff63af247c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2077, "license_type": "no_license", "max_line_length": 80, "num_lines": 78, "path": "/life_jacket.X/inc/gps/nmea.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef NMEA_H\n#define NMEA_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n\ntypedef struct nmea_coordinates_info_t\n{\n bool latitude_north;\n uint16_t latitude_deg;\n float latitude_minutes;\n \n bool longitude_east;\n uint16_t longitude_deg;\n float longitude_minutes;\n \n uint8_t time_of_fix_hours;\n uint8_t time_of_fix_minutes;\n uint8_t time_of_fix_seconds;\n} nmea_coordinates_info_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Handles a NMEA output message.\n*/\nvoid nmea_handle_message(char * message);\n\n/**\n @brief Resets the on lock event flag.\n */\nvoid nmea_reset_on_lock_event(void);\n\n/**\n @brief Checks if the on lock event flag is set.\n @return On lock event flag.\n */\nbool nmea_check_on_lock_event(void);\n\n/**\n @brief Gets the most recent current coordinates.\n @param coordinates - Where to store the coordinates.\n*/\nvoid nmea_get_coordinates(nmea_coordinates_info_t * coordinates);\n\n/**\n * @brief Prints the current status on the debug UART.\n */\nvoid nmea_print_status(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* NMEA_H */\n\n" }, { "alpha_fraction": 0.4431030750274658, "alphanum_fraction": 0.4437737464904785, "avg_line_length": 37.517242431640625, "blob_id": "f39c7f380e6188b119035a11246909249e41d5a4", "content_id": "c3fb8e4f89148a4e70e4c3a24e4b9820eed69a6c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4474, "license_type": "no_license", "max_line_length": 111, "num_lines": 116, "path": "/life_jacket.X/terminal_doc_gen.py", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "# This script generates documentation which can be seen in the dsp terminal.\n\nclass Command_doc:\n tag = \"\"\n cmd = \"\"\n doc = \"\"\n\n # @brief Creates a command documentation for one command\n # @param tag - The name of the C string constant\n # @param cmd - The command to create a Command_doc for\n # @param doc - The documentation text of the command\n def __init__(self, command_tag = \"\", command = \"\", documentation = \"\"):\n self.tag = command_tag\n self.cmd = command\n self.doc = documentation\n\nclass Cmd_parser:\n commands = []\n \n def parse_command_doc(self, filename = \"src\\\\uart\\\\terminal.c\"):\n start_tag = \"/*§\"\n end_tag = \"*/\"\n \n with open(filename) as src_file:\n lines = src_file.readlines()\n\n parsing_doc = False\n parse_cmd = False\n doc = \"\"\n tag = \"\"\n cmd = \"\"\n for line in lines:\n if parse_cmd:\n parse_cmd = False\n end_of_tag = line.index('[')\n tag = line[len(\"static const char \"):end_of_tag]\n start_of_cmd = line.index('\"') + 1\n end_of_cmd = start_of_cmd + 1 + line[start_of_cmd + 1:].index('\"')\n cmd = line[start_of_cmd:end_of_cmd]\n self.commands.append(Command_doc(tag, cmd, doc))\n\n if start_tag in line:\n parsing_doc = True\n doc = \"\"\n tag = \"\"\n cmd = \"\"\n elif parsing_doc and end_tag in line:\n parsing_doc = False\n parse_cmd = True\n elif parsing_doc:\n doc += line.strip() + \"\\\\n\\\\r\\\\t\"\n\n def create_help_function(self):\n with open(\"src/uart/terminal_help.c\", 'w') as f:\n print(\"/*\", file=f)\n print(\"This file is an auto generated file.\", file=f)\n print(\"Do not modify its contents manually!\", file=f)\n print(\"*/\", file=f)\n print(\"#include <string.h>\", file=f)\n print(\"#include <stddef.h>\", file=f)\n print(\"#include \\\"hal/uart.h\\\"\", file=f)\n\n print(\"void terminal_help(char* in)\", file=f)\n print(\"{\", file=f)\n\n first = True\n for cmd in self.commands:\n if first:\n first = False\n print(\" if (NULL != strstr(in, \\\"\" + cmd.cmd + \"\\\"))\", file=f)\n else:\n print(\" else if (NULL != strstr(in, \\\"\" + cmd.cmd + \"\\\"))\", file=f)\n print(\" {\", file=f)\n print(\" uart_write_string(\\\"\\\\t\" + cmd.doc + \"\\\\n\\\\r\\\");\", file=f)\n print(\" while (!uart_is_write_buffer_empty()){;}\", file=f)\n print(\" }\", file=f)\n\n print(\" else\", file=f)\n print(\" {\", file=f)\n\n commands_list = []\n for cmd in self.commands:\n commands_list.append(cmd.cmd + \"\\\\n\\\\r\\\\t\")\n commands_list.sort()\n\n commands_string = '\"\\\\t'\n for line in commands_list:\n commands_string += line\n commands_string += '\"'\n\n print(\" uart_write_string(\\\"\\\\tType \\\\\\\"help <command>\\\\\\\" for more info\\\\n\\\\r\\\");\", file=f)\n print(\" while (!uart_is_write_buffer_empty()){;}\", file=f)\n print(\" uart_write_string(\\\"\\\\tAvailible commands:\\\\n\\\\r\\\");\", file=f)\n print(\" while (!uart_is_write_buffer_empty()){;}\", file=f)\n print(\" uart_write_string(\\\"\\\\t------------------------------------\\\\n\\\\r\\\\t\\\");\", file=f)\n print(\" while (!uart_is_write_buffer_empty()){;}\", file=f)\n \n for line in commands_list:\n print(' uart_write_string(\"' + line + '\");', file=f)\n print(\" while (!uart_is_write_buffer_empty()){;}\", file=f)\n\n print(\" uart_write_string(\\\"\\\\n\\\\r\\\");\", file=f)\n print(\" }\", file=f)\n print(\"}\", file=f)\n \n \n# ===============================================================================\n# Module test\n# ===============================================================================\n\nif __name__ == \"__main__\":\n print(\"Terminal doc gen started\")\n parser = Cmd_parser()\n parser.parse_command_doc()\n parser.create_help_function()\n print(\"Terminal doc gen complete\")\n \n" }, { "alpha_fraction": 0.6337119340896606, "alphanum_fraction": 0.6482076644897461, "avg_line_length": 73.02434539794922, "blob_id": "9fd179a57b00f4d69c2f40f5c237769cffaf98d7", "content_id": "af6e85bed444fd31a0f81feff5cb27b27bb5e433", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 39529, "license_type": "no_license", "max_line_length": 948, "num_lines": 534, "path": "/life_jacket.X/nbproject/Makefile-default.mk", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#\n# Generated Makefile - do not edit!\n#\n# Edit the Makefile in the project folder instead (../Makefile). Each target\n# has a -pre and a -post target defined where you can add customized code.\n#\n# This makefile implements configuration specific macros and targets.\n\n\n# Include project Makefile\nifeq \"${IGNORE_LOCAL}\" \"TRUE\"\n# do not include local makefile. User is passing all local related variables already\nelse\ninclude Makefile\n# Include makefile containing local settings\nifeq \"$(wildcard nbproject/Makefile-local-default.mk)\" \"nbproject/Makefile-local-default.mk\"\ninclude nbproject/Makefile-local-default.mk\nendif\nendif\n\n# Environment\nMKDIR=gnumkdir -p\nRM=rm -f \nMV=mv \nCP=cp \n\n# Macros\nCND_CONF=default\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\nIMAGE_TYPE=debug\nOUTPUT_SUFFIX=elf\nDEBUGGABLE_SUFFIX=elf\nFINAL_IMAGE=dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}\nelse\nIMAGE_TYPE=production\nOUTPUT_SUFFIX=hex\nDEBUGGABLE_SUFFIX=elf\nFINAL_IMAGE=dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}\nendif\n\n# Object Directory\nOBJECTDIR=build/${CND_CONF}/${IMAGE_TYPE}\n\n# Distribution Directory\nDISTDIR=dist/${CND_CONF}/${IMAGE_TYPE}\n\n# Source Files Quoted if spaced\nSOURCEFILES_QUOTED_IF_SPACED=src/acc/accelerometer.c src/acc/lis2hh12_io.c src/audio/ext_flash.c src/audio/pcm1770.c src/audio/audio.c src/audio/dma.c src/gps/jf2_uart.c src/gps/nmea.c src/gps/nmea_queue.c src/gps/jf2_io.c src/gps/gps.c src/hal/clock.c src/hal/flash.c src/hal/gpio.c src/hal/spi_hal.c src/hal/uart.c src/hal/configuration_bits.c src/lora/rfm95w.c src/lora/rfm95w_io.c src/lora/lora_tx_queue.c src/lora/p2pc_protocol.c src/lora/p2ps_protocol.c src/main/main.c src/uart/terminal.c src/uart/terminal_help.c src/uart/debug_log.c src/status.c\n\n# Object Files Quoted if spaced\nOBJECTFILES_QUOTED_IF_SPACED=${OBJECTDIR}/src/acc/accelerometer.o ${OBJECTDIR}/src/acc/lis2hh12_io.o ${OBJECTDIR}/src/audio/ext_flash.o ${OBJECTDIR}/src/audio/pcm1770.o ${OBJECTDIR}/src/audio/audio.o ${OBJECTDIR}/src/audio/dma.o ${OBJECTDIR}/src/gps/jf2_uart.o ${OBJECTDIR}/src/gps/nmea.o ${OBJECTDIR}/src/gps/nmea_queue.o ${OBJECTDIR}/src/gps/jf2_io.o ${OBJECTDIR}/src/gps/gps.o ${OBJECTDIR}/src/hal/clock.o ${OBJECTDIR}/src/hal/flash.o ${OBJECTDIR}/src/hal/gpio.o ${OBJECTDIR}/src/hal/spi_hal.o ${OBJECTDIR}/src/hal/uart.o ${OBJECTDIR}/src/hal/configuration_bits.o ${OBJECTDIR}/src/lora/rfm95w.o ${OBJECTDIR}/src/lora/rfm95w_io.o ${OBJECTDIR}/src/lora/lora_tx_queue.o ${OBJECTDIR}/src/lora/p2pc_protocol.o ${OBJECTDIR}/src/lora/p2ps_protocol.o ${OBJECTDIR}/src/main/main.o ${OBJECTDIR}/src/uart/terminal.o ${OBJECTDIR}/src/uart/terminal_help.o ${OBJECTDIR}/src/uart/debug_log.o ${OBJECTDIR}/src/status.o\nPOSSIBLE_DEPFILES=${OBJECTDIR}/src/acc/accelerometer.o.d ${OBJECTDIR}/src/acc/lis2hh12_io.o.d ${OBJECTDIR}/src/audio/ext_flash.o.d ${OBJECTDIR}/src/audio/pcm1770.o.d ${OBJECTDIR}/src/audio/audio.o.d ${OBJECTDIR}/src/audio/dma.o.d ${OBJECTDIR}/src/gps/jf2_uart.o.d ${OBJECTDIR}/src/gps/nmea.o.d ${OBJECTDIR}/src/gps/nmea_queue.o.d ${OBJECTDIR}/src/gps/jf2_io.o.d ${OBJECTDIR}/src/gps/gps.o.d ${OBJECTDIR}/src/hal/clock.o.d ${OBJECTDIR}/src/hal/flash.o.d ${OBJECTDIR}/src/hal/gpio.o.d ${OBJECTDIR}/src/hal/spi_hal.o.d ${OBJECTDIR}/src/hal/uart.o.d ${OBJECTDIR}/src/hal/configuration_bits.o.d ${OBJECTDIR}/src/lora/rfm95w.o.d ${OBJECTDIR}/src/lora/rfm95w_io.o.d ${OBJECTDIR}/src/lora/lora_tx_queue.o.d ${OBJECTDIR}/src/lora/p2pc_protocol.o.d ${OBJECTDIR}/src/lora/p2ps_protocol.o.d ${OBJECTDIR}/src/main/main.o.d ${OBJECTDIR}/src/uart/terminal.o.d ${OBJECTDIR}/src/uart/terminal_help.o.d ${OBJECTDIR}/src/uart/debug_log.o.d ${OBJECTDIR}/src/status.o.d\n\n# Object Files\nOBJECTFILES=${OBJECTDIR}/src/acc/accelerometer.o ${OBJECTDIR}/src/acc/lis2hh12_io.o ${OBJECTDIR}/src/audio/ext_flash.o ${OBJECTDIR}/src/audio/pcm1770.o ${OBJECTDIR}/src/audio/audio.o ${OBJECTDIR}/src/audio/dma.o ${OBJECTDIR}/src/gps/jf2_uart.o ${OBJECTDIR}/src/gps/nmea.o ${OBJECTDIR}/src/gps/nmea_queue.o ${OBJECTDIR}/src/gps/jf2_io.o ${OBJECTDIR}/src/gps/gps.o ${OBJECTDIR}/src/hal/clock.o ${OBJECTDIR}/src/hal/flash.o ${OBJECTDIR}/src/hal/gpio.o ${OBJECTDIR}/src/hal/spi_hal.o ${OBJECTDIR}/src/hal/uart.o ${OBJECTDIR}/src/hal/configuration_bits.o ${OBJECTDIR}/src/lora/rfm95w.o ${OBJECTDIR}/src/lora/rfm95w_io.o ${OBJECTDIR}/src/lora/lora_tx_queue.o ${OBJECTDIR}/src/lora/p2pc_protocol.o ${OBJECTDIR}/src/lora/p2ps_protocol.o ${OBJECTDIR}/src/main/main.o ${OBJECTDIR}/src/uart/terminal.o ${OBJECTDIR}/src/uart/terminal_help.o ${OBJECTDIR}/src/uart/debug_log.o ${OBJECTDIR}/src/status.o\n\n# Source Files\nSOURCEFILES=src/acc/accelerometer.c src/acc/lis2hh12_io.c src/audio/ext_flash.c src/audio/pcm1770.c src/audio/audio.c src/audio/dma.c src/gps/jf2_uart.c src/gps/nmea.c src/gps/nmea_queue.c src/gps/jf2_io.c src/gps/gps.c src/hal/clock.c src/hal/flash.c src/hal/gpio.c src/hal/spi_hal.c src/hal/uart.c src/hal/configuration_bits.c src/lora/rfm95w.c src/lora/rfm95w_io.c src/lora/lora_tx_queue.c src/lora/p2pc_protocol.c src/lora/p2ps_protocol.c src/main/main.c src/uart/terminal.c src/uart/terminal_help.c src/uart/debug_log.c src/status.c\n\n\nCFLAGS=\nASFLAGS=\nLDLIBSOPTIONS=\n\n############# Tool locations ##########################################\n# If you copy a project from one host to another, the path where the #\n# compiler is installed may be different. #\n# If you open this project with MPLAB X in the new host, this #\n# makefile will be regenerated and the paths will be corrected. #\n#######################################################################\n# fixDeps replaces a bunch of sed/cat/printf statements that slow down the build\nFIXDEPS=fixDeps\n\n# The following macros may be used in the pre and post step lines\nDevice=PIC24FJ128GA204\nProjectDir=\"C:\\Users\\Erik\\Dropbox\\exjobb\\sw\\working_dir\\lifeJacket\\life_jacket.X\"\nConfName=default\nImagePath=\"dist\\default\\${IMAGE_TYPE}\\life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}\"\nImageDir=\"dist\\default\\${IMAGE_TYPE}\"\nImageName=\"life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}\"\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\nIsDebug=\"true\"\nelse\nIsDebug=\"false\"\nendif\n\n.build-conf: .pre ${BUILD_SUBPROJECTS}\nifneq ($(INFORMATION_MESSAGE), )\n\t@echo $(INFORMATION_MESSAGE)\nendif\n\t${MAKE} -f nbproject/Makefile-default.mk dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}\n\t@echo \"--------------------------------------\"\n\t@echo \"User defined post-build step: [python -E increment_build_number.py]\"\n\t@python -E increment_build_number.py\n\t@echo \"--------------------------------------\"\n\nMP_PROCESSOR_OPTION=24FJ128GA204\nMP_LINKER_FILE_OPTION=,--script=p24FJ128GA204.gld\n# ------------------------------------------------------------------------------------\n# Rules for buildStep: compile\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\n${OBJECTDIR}/src/acc/accelerometer.o: src/acc/accelerometer.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/acc\" \n\t@${RM} ${OBJECTDIR}/src/acc/accelerometer.o.d \n\t@${RM} ${OBJECTDIR}/src/acc/accelerometer.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/acc/accelerometer.c -o ${OBJECTDIR}/src/acc/accelerometer.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/acc/accelerometer.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/acc/accelerometer.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/acc/lis2hh12_io.o: src/acc/lis2hh12_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/acc\" \n\t@${RM} ${OBJECTDIR}/src/acc/lis2hh12_io.o.d \n\t@${RM} ${OBJECTDIR}/src/acc/lis2hh12_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/acc/lis2hh12_io.c -o ${OBJECTDIR}/src/acc/lis2hh12_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/acc/lis2hh12_io.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/acc/lis2hh12_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/ext_flash.o: src/audio/ext_flash.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/ext_flash.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/ext_flash.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/ext_flash.c -o ${OBJECTDIR}/src/audio/ext_flash.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/ext_flash.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/ext_flash.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/pcm1770.o: src/audio/pcm1770.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/pcm1770.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/pcm1770.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/pcm1770.c -o ${OBJECTDIR}/src/audio/pcm1770.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/pcm1770.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/pcm1770.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/audio.o: src/audio/audio.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/audio.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/audio.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/audio.c -o ${OBJECTDIR}/src/audio/audio.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/audio.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/audio.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/dma.o: src/audio/dma.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/dma.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/dma.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/dma.c -o ${OBJECTDIR}/src/audio/dma.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/dma.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/dma.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/jf2_uart.o: src/gps/jf2_uart.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_uart.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_uart.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/jf2_uart.c -o ${OBJECTDIR}/src/gps/jf2_uart.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/jf2_uart.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/jf2_uart.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/nmea.o: src/gps/nmea.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/nmea.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/nmea.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/nmea.c -o ${OBJECTDIR}/src/gps/nmea.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/nmea.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/nmea.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/nmea_queue.o: src/gps/nmea_queue.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/nmea_queue.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/nmea_queue.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/nmea_queue.c -o ${OBJECTDIR}/src/gps/nmea_queue.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/nmea_queue.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/nmea_queue.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/jf2_io.o: src/gps/jf2_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_io.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/jf2_io.c -o ${OBJECTDIR}/src/gps/jf2_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/jf2_io.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/jf2_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/gps.o: src/gps/gps.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/gps.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/gps.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/gps.c -o ${OBJECTDIR}/src/gps/gps.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/gps.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/gps.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/clock.o: src/hal/clock.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/clock.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/clock.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/clock.c -o ${OBJECTDIR}/src/hal/clock.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/clock.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/clock.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/flash.o: src/hal/flash.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/flash.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/flash.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/flash.c -o ${OBJECTDIR}/src/hal/flash.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/flash.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/flash.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/gpio.o: src/hal/gpio.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/gpio.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/gpio.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/gpio.c -o ${OBJECTDIR}/src/hal/gpio.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/gpio.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/gpio.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/spi_hal.o: src/hal/spi_hal.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/spi_hal.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/spi_hal.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/spi_hal.c -o ${OBJECTDIR}/src/hal/spi_hal.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/spi_hal.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/spi_hal.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/uart.o: src/hal/uart.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/uart.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/uart.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/uart.c -o ${OBJECTDIR}/src/hal/uart.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/uart.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/uart.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/configuration_bits.o: src/hal/configuration_bits.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/configuration_bits.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/configuration_bits.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/configuration_bits.c -o ${OBJECTDIR}/src/hal/configuration_bits.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/configuration_bits.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/configuration_bits.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/rfm95w.o: src/lora/rfm95w.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/rfm95w.c -o ${OBJECTDIR}/src/lora/rfm95w.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/rfm95w.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/rfm95w.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/rfm95w_io.o: src/lora/rfm95w_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w_io.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/rfm95w_io.c -o ${OBJECTDIR}/src/lora/rfm95w_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/rfm95w_io.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/rfm95w_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/lora_tx_queue.o: src/lora/lora_tx_queue.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/lora_tx_queue.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/lora_tx_queue.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/lora_tx_queue.c -o ${OBJECTDIR}/src/lora/lora_tx_queue.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/lora_tx_queue.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/lora_tx_queue.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/p2pc_protocol.o: src/lora/p2pc_protocol.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/p2pc_protocol.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/p2pc_protocol.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/p2pc_protocol.c -o ${OBJECTDIR}/src/lora/p2pc_protocol.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/p2pc_protocol.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/p2pc_protocol.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/p2ps_protocol.o: src/lora/p2ps_protocol.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/p2ps_protocol.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/p2ps_protocol.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/p2ps_protocol.c -o ${OBJECTDIR}/src/lora/p2ps_protocol.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/p2ps_protocol.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/p2ps_protocol.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/main/main.o: src/main/main.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/main\" \n\t@${RM} ${OBJECTDIR}/src/main/main.o.d \n\t@${RM} ${OBJECTDIR}/src/main/main.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/main/main.c -o ${OBJECTDIR}/src/main/main.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/main/main.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/main/main.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/terminal.o: src/uart/terminal.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/terminal.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/terminal.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/terminal.c -o ${OBJECTDIR}/src/uart/terminal.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/terminal.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/terminal.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/terminal_help.o: src/uart/terminal_help.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/terminal_help.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/terminal_help.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/terminal_help.c -o ${OBJECTDIR}/src/uart/terminal_help.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/terminal_help.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/terminal_help.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/debug_log.o: src/uart/debug_log.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/debug_log.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/debug_log.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/debug_log.c -o ${OBJECTDIR}/src/uart/debug_log.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/debug_log.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/debug_log.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/status.o: src/status.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src\" \n\t@${RM} ${OBJECTDIR}/src/status.o.d \n\t@${RM} ${OBJECTDIR}/src/status.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/status.c -o ${OBJECTDIR}/src/status.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/status.o.d\" -g -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -mno-eds-warn -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/status.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\nelse\n${OBJECTDIR}/src/acc/accelerometer.o: src/acc/accelerometer.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/acc\" \n\t@${RM} ${OBJECTDIR}/src/acc/accelerometer.o.d \n\t@${RM} ${OBJECTDIR}/src/acc/accelerometer.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/acc/accelerometer.c -o ${OBJECTDIR}/src/acc/accelerometer.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/acc/accelerometer.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/acc/accelerometer.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/acc/lis2hh12_io.o: src/acc/lis2hh12_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/acc\" \n\t@${RM} ${OBJECTDIR}/src/acc/lis2hh12_io.o.d \n\t@${RM} ${OBJECTDIR}/src/acc/lis2hh12_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/acc/lis2hh12_io.c -o ${OBJECTDIR}/src/acc/lis2hh12_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/acc/lis2hh12_io.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/acc/lis2hh12_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/ext_flash.o: src/audio/ext_flash.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/ext_flash.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/ext_flash.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/ext_flash.c -o ${OBJECTDIR}/src/audio/ext_flash.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/ext_flash.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/ext_flash.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/pcm1770.o: src/audio/pcm1770.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/pcm1770.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/pcm1770.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/pcm1770.c -o ${OBJECTDIR}/src/audio/pcm1770.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/pcm1770.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/pcm1770.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/audio.o: src/audio/audio.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/audio.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/audio.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/audio.c -o ${OBJECTDIR}/src/audio/audio.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/audio.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/audio.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/audio/dma.o: src/audio/dma.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/audio\" \n\t@${RM} ${OBJECTDIR}/src/audio/dma.o.d \n\t@${RM} ${OBJECTDIR}/src/audio/dma.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/audio/dma.c -o ${OBJECTDIR}/src/audio/dma.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/audio/dma.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/audio/dma.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/jf2_uart.o: src/gps/jf2_uart.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_uart.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_uart.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/jf2_uart.c -o ${OBJECTDIR}/src/gps/jf2_uart.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/jf2_uart.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/jf2_uart.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/nmea.o: src/gps/nmea.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/nmea.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/nmea.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/nmea.c -o ${OBJECTDIR}/src/gps/nmea.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/nmea.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/nmea.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/nmea_queue.o: src/gps/nmea_queue.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/nmea_queue.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/nmea_queue.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/nmea_queue.c -o ${OBJECTDIR}/src/gps/nmea_queue.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/nmea_queue.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/nmea_queue.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/jf2_io.o: src/gps/jf2_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_io.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/jf2_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/jf2_io.c -o ${OBJECTDIR}/src/gps/jf2_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/jf2_io.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/jf2_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/gps/gps.o: src/gps/gps.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/gps\" \n\t@${RM} ${OBJECTDIR}/src/gps/gps.o.d \n\t@${RM} ${OBJECTDIR}/src/gps/gps.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/gps/gps.c -o ${OBJECTDIR}/src/gps/gps.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/gps/gps.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/gps/gps.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/clock.o: src/hal/clock.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/clock.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/clock.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/clock.c -o ${OBJECTDIR}/src/hal/clock.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/clock.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/clock.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/flash.o: src/hal/flash.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/flash.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/flash.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/flash.c -o ${OBJECTDIR}/src/hal/flash.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/flash.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/flash.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/gpio.o: src/hal/gpio.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/gpio.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/gpio.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/gpio.c -o ${OBJECTDIR}/src/hal/gpio.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/gpio.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/gpio.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/spi_hal.o: src/hal/spi_hal.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/spi_hal.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/spi_hal.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/spi_hal.c -o ${OBJECTDIR}/src/hal/spi_hal.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/spi_hal.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/spi_hal.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/uart.o: src/hal/uart.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/uart.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/uart.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/uart.c -o ${OBJECTDIR}/src/hal/uart.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/uart.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/uart.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/hal/configuration_bits.o: src/hal/configuration_bits.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/hal\" \n\t@${RM} ${OBJECTDIR}/src/hal/configuration_bits.o.d \n\t@${RM} ${OBJECTDIR}/src/hal/configuration_bits.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/hal/configuration_bits.c -o ${OBJECTDIR}/src/hal/configuration_bits.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/hal/configuration_bits.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/hal/configuration_bits.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/rfm95w.o: src/lora/rfm95w.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/rfm95w.c -o ${OBJECTDIR}/src/lora/rfm95w.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/rfm95w.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/rfm95w.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/rfm95w_io.o: src/lora/rfm95w_io.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w_io.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/rfm95w_io.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/rfm95w_io.c -o ${OBJECTDIR}/src/lora/rfm95w_io.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/rfm95w_io.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/rfm95w_io.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/lora_tx_queue.o: src/lora/lora_tx_queue.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/lora_tx_queue.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/lora_tx_queue.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/lora_tx_queue.c -o ${OBJECTDIR}/src/lora/lora_tx_queue.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/lora_tx_queue.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/lora_tx_queue.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/p2pc_protocol.o: src/lora/p2pc_protocol.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/p2pc_protocol.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/p2pc_protocol.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/p2pc_protocol.c -o ${OBJECTDIR}/src/lora/p2pc_protocol.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/p2pc_protocol.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/p2pc_protocol.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/lora/p2ps_protocol.o: src/lora/p2ps_protocol.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/lora\" \n\t@${RM} ${OBJECTDIR}/src/lora/p2ps_protocol.o.d \n\t@${RM} ${OBJECTDIR}/src/lora/p2ps_protocol.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/lora/p2ps_protocol.c -o ${OBJECTDIR}/src/lora/p2ps_protocol.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/lora/p2ps_protocol.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/lora/p2ps_protocol.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/main/main.o: src/main/main.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/main\" \n\t@${RM} ${OBJECTDIR}/src/main/main.o.d \n\t@${RM} ${OBJECTDIR}/src/main/main.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/main/main.c -o ${OBJECTDIR}/src/main/main.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/main/main.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/main/main.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/terminal.o: src/uart/terminal.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/terminal.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/terminal.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/terminal.c -o ${OBJECTDIR}/src/uart/terminal.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/terminal.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/terminal.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/terminal_help.o: src/uart/terminal_help.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/terminal_help.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/terminal_help.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/terminal_help.c -o ${OBJECTDIR}/src/uart/terminal_help.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/terminal_help.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/terminal_help.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/uart/debug_log.o: src/uart/debug_log.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src/uart\" \n\t@${RM} ${OBJECTDIR}/src/uart/debug_log.o.d \n\t@${RM} ${OBJECTDIR}/src/uart/debug_log.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/uart/debug_log.c -o ${OBJECTDIR}/src/uart/debug_log.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/uart/debug_log.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/uart/debug_log.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\n${OBJECTDIR}/src/status.o: src/status.c nbproject/Makefile-${CND_CONF}.mk\n\t@${MKDIR} \"${OBJECTDIR}/src\" \n\t@${RM} ${OBJECTDIR}/src/status.o.d \n\t@${RM} ${OBJECTDIR}/src/status.o \n\t${MP_CC} $(MP_EXTRA_CC_PRE) src/status.c -o ${OBJECTDIR}/src/status.o -c -mcpu=$(MP_PROCESSOR_OPTION) -MMD -MF \"${OBJECTDIR}/src/status.o.d\" -mno-eds-warn -g -omf=elf -O0 -I\"inc\" -msmart-io=1 -Wall -msfr-warn=off\n\t@${FIXDEPS} \"${OBJECTDIR}/src/status.o.d\" $(SILENT) -rsi ${MP_CC_DIR}../ \n\t\nendif\n\n# ------------------------------------------------------------------------------------\n# Rules for buildStep: assemble\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\nelse\nendif\n\n# ------------------------------------------------------------------------------------\n# Rules for buildStep: assemblePreproc\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\nelse\nendif\n\n# ------------------------------------------------------------------------------------\n# Rules for buildStep: link\nifeq ($(TYPE_IMAGE), DEBUG_RUN)\ndist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}: ${OBJECTFILES} nbproject/Makefile-${CND_CONF}.mk \n\t@${MKDIR} dist/${CND_CONF}/${IMAGE_TYPE} \n\t${MP_CC} $(MP_EXTRA_LD_PRE) -o dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX} ${OBJECTFILES_QUOTED_IF_SPACED} -mcpu=$(MP_PROCESSOR_OPTION) -D__DEBUG -D__MPLAB_DEBUGGER_PK3=1 -omf=elf -mreserve=data@0x800:0x81B -mreserve=data@0x81C:0x81D -mreserve=data@0x81E:0x81F -mreserve=data@0x820:0x821 -mreserve=data@0x822:0x823 -mreserve=data@0x824:0x827 -mreserve=data@0x82A:0x84F -Wl,--local-stack,--defsym=__MPLAB_BUILD=1,--defsym=__MPLAB_DEBUG=1,--defsym=__DEBUG=1,--defsym=__MPLAB_DEBUGGER_PK3=1,$(MP_LINKER_FILE_OPTION),--heap=0,--stack=16,--check-sections,--data-init,--pack-data,--handles,--isr,--no-gc-sections,--fill-upper=0,--stackguard=16,--no-force-link,--smart-io,-Map=\"${DISTDIR}/${PROJECTNAME}.${IMAGE_TYPE}.map\",--report-mem$(MP_EXTRA_LD_POST) \n\t\nelse\ndist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${OUTPUT_SUFFIX}: ${OBJECTFILES} nbproject/Makefile-${CND_CONF}.mk \n\t@${MKDIR} dist/${CND_CONF}/${IMAGE_TYPE} \n\t${MP_CC} $(MP_EXTRA_LD_PRE) -o dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${DEBUGGABLE_SUFFIX} ${OBJECTFILES_QUOTED_IF_SPACED} -mcpu=$(MP_PROCESSOR_OPTION) -omf=elf -Wl,--local-stack,--defsym=__MPLAB_BUILD=1,$(MP_LINKER_FILE_OPTION),--heap=0,--stack=16,--check-sections,--data-init,--pack-data,--handles,--isr,--no-gc-sections,--fill-upper=0,--stackguard=16,--no-force-link,--smart-io,-Map=\"${DISTDIR}/${PROJECTNAME}.${IMAGE_TYPE}.map\",--report-mem$(MP_EXTRA_LD_POST) \n\t${MP_CC_DIR}\\\\xc16-bin2hex dist/${CND_CONF}/${IMAGE_TYPE}/life_jacket.X.${IMAGE_TYPE}.${DEBUGGABLE_SUFFIX} -a -omf=elf \n\t\nendif\n\n.pre:\n\t@echo \"--------------------------------------\"\n\t@echo \"User defined pre-build step: [python -E terminal_doc_gen.py]\"\n\t@python -E terminal_doc_gen.py\n\t@echo \"--------------------------------------\"\n\n# Subprojects\n.build-subprojects:\n\n\n# Subprojects\n.clean-subprojects:\n\n# Clean Targets\n.clean-conf: ${CLEAN_SUBPROJECTS}\n\t${RM} -r build/default\n\t${RM} -r dist/default\n\n# Enable dependency checking\n.dep.inc: .depcheck-impl\n\nDEPFILES=$(shell mplabwildcard ${POSSIBLE_DEPFILES})\nifneq (${DEPFILES},)\ninclude ${DEPFILES}\nendif\n" }, { "alpha_fraction": 0.8009153604507446, "alphanum_fraction": 0.8009153604507446, "avg_line_length": 32.61538314819336, "blob_id": "3ff47cc076b65e28cadcb8cfe27a0b3d57f8c3f8", "content_id": "cb34c354c3ef1f645183b5c1ec01e9987f54fb4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 437, "license_type": "no_license", "max_line_length": 78, "num_lines": 13, "path": "/life_jacket.X/nbproject/Makefile-variables.mk", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#\n# Generated - do not edit!\n#\n# NOCDDL\n#\nCND_BASEDIR=`pwd`\n# default configuration\nCND_ARTIFACT_DIR_default=dist/default/production\nCND_ARTIFACT_NAME_default=life_jacket.X.production.hex\nCND_ARTIFACT_PATH_default=dist/default/production/life_jacket.X.production.hex\nCND_PACKAGE_DIR_default=${CND_DISTDIR}/default/package\nCND_PACKAGE_NAME_default=lifejacket.x.tar\nCND_PACKAGE_PATH_default=${CND_DISTDIR}/default/package/lifejacket.x.tar\n" }, { "alpha_fraction": 0.4671947956085205, "alphanum_fraction": 0.5365330576896667, "avg_line_length": 27.838708877563477, "blob_id": "6a19cdc385087cca0d0f3ffd68ac0fe4c44caf83", "content_id": "07c576586cba145ed6f9d53518b07f648b6d5c55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10730, "license_type": "no_license", "max_line_length": 80, "num_lines": 372, "path": "/life_jacket.X/src/lora/rfm95w_io.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"lora/rfm95w_io.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"hal/spi_hal.h\"\n#include \"hal/uart.h\"\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n/*\n Allowed frequencies:\n 868.1 - SF7BW125 to SF12BW125\n 868.3 - SF7BW125 to SF12BW125 and SF7BW250\n 868.5 - SF7BW125 to SF12BW125\n 867.1 - SF7BW125 to SF12BW125\n 867.3 - SF7BW125 to SF12BW125\n 867.5 - SF7BW125 to SF12BW125\n 867.7 - SF7BW125 to SF12BW125\n 867.9 - SF7BW125 to SF12BW125\n 868.8 - FSK\n\n Max output power: 14 dBm\n*/\nstatic const uint8_t FREQ_WORD_868_1[] = {0xD9, 0x06, 0x66};\nstatic const uint8_t FREQ_WORD_868_3[] = {0xD9, 0x13, 0x33};\nstatic const uint8_t FREQ_WORD_868_5[] = {0xD9, 0x20, 0x00};\nstatic const uint8_t FREQ_WORD_867_1[] = {0xD8, 0xC6, 0x66};\nstatic const uint8_t FREQ_WORD_867_3[] = {0xD8, 0xD3, 0x33};\nstatic const uint8_t FREQ_WORD_867_5[] = {0xD8, 0xE0, 0x00};\nstatic const uint8_t FREQ_WORD_867_7[] = {0xD8, 0xEC, 0xCC};\nstatic const uint8_t FREQ_WORD_867_9[] = {0xD8, 0xF9, 0x99};\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid rfm95w_io_write(rfm95w_address_t reg, uint8_t value)\n{\n uint16_t value_to_write = 0x8000;\n\n spi_hal_setup_for_device(SPI_DEVICE_RFM95W);\n\n value_to_write |= (((uint16_t)reg) << 8);\n value_to_write |= (uint16_t)value;\n\n (void)spi_hal_tranceive16(value_to_write);\n}\n\nuint8_t rfm95w_io_read(rfm95w_address_t reg)\n{\n uint16_t value_to_write = 0x0000;\n uint16_t read_value;\n\n spi_hal_setup_for_device(SPI_DEVICE_RFM95W);\n\n value_to_write |= (((uint16_t)reg) << 8);\n\n read_value = spi_hal_tranceive16(value_to_write);\n\n return ((uint8_t)read_value);\n}\n\nvoid rfm95w_io_set_dio_function(uint8_t dio_number, uint8_t dio_function)\n{\n uint8_t reg_addr = RFM95W_REG_DIO_MAPPING_1;\n uint8_t bit_shift = 0;\n uint8_t reg_value;\n\n switch (dio_number)\n {\n case 0:\n reg_addr = RFM95W_REG_DIO_MAPPING_1;\n bit_shift = 6;\n break;\n\n case 1:\n reg_addr = RFM95W_REG_DIO_MAPPING_1;\n bit_shift = 4;\n break;\n\n case 2:\n reg_addr = RFM95W_REG_DIO_MAPPING_1;\n bit_shift = 2;\n break;\n\n case 3:\n reg_addr = RFM95W_REG_DIO_MAPPING_1;\n bit_shift = 0;\n break;\n\n case 4:\n reg_addr = RFM95W_REG_DIO_MAPPING_2;\n bit_shift = 6;\n break;\n\n case 5:\n reg_addr = RFM95W_REG_DIO_MAPPING_2;\n bit_shift = 4;\n break;\n }\n\n reg_value = rfm95w_io_read(reg_addr);\n reg_value &= ~(0x03 << bit_shift);\n reg_value |= (dio_function & 0x03) << bit_shift;\n\n rfm95w_io_write(reg_addr, reg_value);\n}\n\nvoid rfm95w_io_set_operating_mode(rfm95w_operating_mode_t mode)\n{\n uint8_t reg_value;\n\n reg_value = rfm95w_io_read(RFM95W_REG_OP_MODE);\n reg_value &= 0xF8;\n reg_value |= ((uint8_t)mode) & 0x07;\n rfm95w_io_write(RFM95W_REG_OP_MODE, reg_value);\n}\n\nvoid rfm95w_io_set_single_rx_timeout(uint16_t symbols)\n{\n uint8_t modem_config_2;\n\n if (symbols > 0x03FF)\n {\n symbols = 0x03FF;\n }\n else if (symbols < 4)\n {\n symbols = 4;\n }\n\n modem_config_2 = rfm95w_io_read(RFM95W_REG_MODEM_CONFIG2);\n modem_config_2 &= 0xFC;\n modem_config_2 |= ((uint8_t)(symbols >> 8)) & 0x03;\n rfm95w_io_write(RFM95W_REG_MODEM_CONFIG2, modem_config_2);\n\n rfm95w_io_write(RFM95W_REG_SYMB_TIMEOUT_LSB, (uint8_t)symbols);\n}\n\nvoid rfm95w_io_clear_all_irqs(void)\n{\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS, 0xFF); \n}\n\nvoid rfm95w_io_set_bandwidth(rfm95w_modem_cfg_bw_t bandwidth)\n{\n uint8_t modem_config_1;\n\n modem_config_1 = rfm95w_io_read(RFM95W_REG_MODEM_CONFIG1);\n\n modem_config_1 &= 0x0F;\n modem_config_1 |= (bandwidth << 4);\n\n rfm95w_io_write(RFM95W_REG_MODEM_CONFIG1, modem_config_1);\n\n switch (bandwidth)\n {\n case RFM95W_BW_7K8:\n debug_log_append_line(\"\\tLORA bandwidth set to 7.8 kHz\");\n break;\n\n case RFM95W_BW_10K4:\n debug_log_append_line(\"\\tLORA bandwidth set to 10.4 kHz\");\n break;\n\n case RFM95W_BW_15K6:\n debug_log_append_line(\"\\tLORA bandwidth set to 15.6 kHz\");\n break;\n\n case RFM95W_BW_20K8:\n debug_log_append_line(\"\\tLORA bandwidth set to 20.8 kHz\");\n break;\n\n case RFM95W_BW_31K25:\n debug_log_append_line(\"\\tLORA bandwidth set to 31.25 kHz\");\n break;\n\n case RFM95W_BW_41K7:\n debug_log_append_line(\"\\tLORA bandwidth set to 41.7 kHz\");\n break;\n\n case RFM95W_BW_62K5:\n debug_log_append_line(\"\\tLORA bandwidth set to 62.5 kHz\");\n break;\n\n case RFM95W_BW_125K:\n debug_log_append_line(\"\\tLORA bandwidth set to 125 kHz\");\n break;\n\n case RFM95W_BW_250K:\n debug_log_append_line(\"\\tLORA bandwidth set to 250 kHz\");\n break;\n\n case RFM95W_BW_500K:\n debug_log_append_line(\"\\tLORA bandwidth set to 500 kHz\");\n break;\n\n default:\n debug_log_append_line(\"\\tLORA bandwidth set to invalid value\");\n break;\n }\n}\n\nvoid rfm95w_io_set_coding_rate(rfm95w_coding_rate_t coding_rate)\n{\n uint8_t modem_config_1;\n\n modem_config_1 = rfm95w_io_read(RFM95W_REG_MODEM_CONFIG1);\n\n modem_config_1 &= 0xF1;\n modem_config_1 |= (coding_rate << 1);\n\n rfm95w_io_write(RFM95W_REG_MODEM_CONFIG1, modem_config_1);\n\n switch (coding_rate)\n {\n case RFM95W_CODING_RATE_4_5:\n debug_log_append_line(\"\\tLORA coding rate set to 4/5\");\n break;\n\n case RFM95W_CODING_RATE_4_6:\n debug_log_append_line(\"\\tLORA coding rate set to 4/6\");\n break;\n\n case RFM95W_CODING_RATE_4_7:\n debug_log_append_line(\"\\tLORA coding rate set to 4/7\");\n break;\n\n case RFM95W_CODING_RATE_4_8:\n debug_log_append_line(\"\\tLORA coding rate set to 4/8\");\n break;\n\n default:\n debug_log_append_line(\"\\tLORA coding rate set to invalid value\");\n break;\n }\n}\n\nvoid rfm95w_io_set_speading_factor(rfm95w_spreading_factor_t spreading_factor)\n{\n uint8_t modem_config_2;\n\n modem_config_2 = rfm95w_io_read(RFM95W_REG_MODEM_CONFIG2);\n\n modem_config_2 &= 0x0F;\n modem_config_2 |= (spreading_factor << 4);\n\n rfm95w_io_write(RFM95W_REG_MODEM_CONFIG2, modem_config_2);\n\n switch (spreading_factor)\n {\n case RFM95W_SPREADING_FACTOR_64_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 64 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_128_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 128 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_256_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 256 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_512_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 512 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_1024_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 1024 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_2048_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 2048 chips\");\n break;\n\n case RFM95W_SPREADING_FACTOR_4096_CHIPS:\n debug_log_append_line(\"\\tLORA spreading factor set to 4096 chips\");\n break;\n\n default:\n debug_log_append_line(\"\\tLORA spreading factor set to invalid value\");\n break;\n }\n}\n\nvoid rfm95w_io_set_frequency(rfm95w_channel_frequency_t frequency)\n{\n const uint8_t * freq_word;\n\n switch (frequency)\n {\n case RFM95W_CHANNEL_FREQUENCY_868_1:\n freq_word = FREQ_WORD_868_1;\n debug_log_append_line(\"\\tLORA frequency set to 868.1 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_868_3:\n freq_word = FREQ_WORD_868_3;\n debug_log_append_line(\"\\tLORA frequency set to 868.3 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_868_5:\n freq_word = FREQ_WORD_868_5;\n debug_log_append_line(\"\\tLORA frequency set to 868.5 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_867_1:\n freq_word = FREQ_WORD_867_1;\n debug_log_append_line(\"\\tLORA frequency set to 867.1 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_867_3:\n freq_word = FREQ_WORD_867_3;\n debug_log_append_line(\"\\tLORA frequency set to 867.3 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_867_5:\n freq_word = FREQ_WORD_867_5;\n debug_log_append_line(\"\\tLORA frequency set to 867.5 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_867_7:\n freq_word = FREQ_WORD_867_7;\n debug_log_append_line(\"\\tLORA frequency set to 867.7 MHz\");\n break;\n\n case RFM95W_CHANNEL_FREQUENCY_867_9:\n freq_word = FREQ_WORD_867_9;\n debug_log_append_line(\"\\tLORA frequency set to 867.9 MHz\");\n break;\n\n default:\n freq_word = FREQ_WORD_868_1;\n debug_log_append_line(\"\\tLORA frequency set to 868.1 MHz\");\n break;\n }\n\n rfm95w_io_write(RFM95W_REG_FRF_MSB, freq_word[0]);\n rfm95w_io_write(RFM95W_REG_FRF_MID, freq_word[1]);\n rfm95w_io_write(RFM95W_REG_FRF_LSB, freq_word[2]);\n}\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\n\n" }, { "alpha_fraction": 0.4498969614505768, "alphanum_fraction": 0.5071155428886414, "avg_line_length": 24.34577178955078, "blob_id": "e9bdd76e6f47a273e0f0792d8575d2693392a9da", "content_id": "ab1d51d0e3615a736a65420572220056c958c1af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10211, "license_type": "no_license", "max_line_length": 80, "num_lines": 402, "path": "/life_jacket.X/src/hal/spi_hal.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"hal/spi_hal.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"hal/gpio.h\"\n#include \"hal/clock.h\"\n\n#include <stdio.h>\n#include \"hal/uart.h\"\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// LIS2HH12 max clock freq is 10Mhz, run at 1 MHz\n#define LIS2HH12_TARGET_FREQ (1000000ull)\nstatic uint16_t LIS2HH12_BRG = \n\tCLOCK_HAL_PCBCLOCK_FREQ / (2 * LIS2HH12_TARGET_FREQ) - 1;\n\n// RFM95W max clock freq is 10Mhz, run at 1 MHz\n#define RFM95W_TARGET_FREQ (1000000ull)\nstatic uint16_t RFM95W_BRG = \n CLOCK_HAL_PCBCLOCK_FREQ / (2 * RFM95W_TARGET_FREQ) - 1;\n\n// PCM1770 max clock freq is 16Mhz, run at 1 MHz\n#define PCM1770_TARGET_FREQ (1000000ull)\nstatic uint16_t PCM1770_BRG = \n CLOCK_HAL_PCBCLOCK_FREQ / (2 * PCM1770_TARGET_FREQ) - 1;\n\n// MX25R6435F max clock freq is 8Mhz of reads, run at 8 MHz\n#define MX25R6435F_TARGET_FREQ (8000000ull)\nstatic uint16_t MX25R6435F_BRG = \n CLOCK_HAL_PCBCLOCK_FREQ / (2 * MX25R6435F_TARGET_FREQ) - 1;\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic spi_hal_device_t current_device = SPI_DEVICE_NULL;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void spi_hal_setup_for_lis2hh12(void);\nstatic void spi_hal_setup_for_rfm95w(void);\nstatic void spi_hal_setup_for_pcm1770(void);\nstatic void spi_hal_setup_for_mx25r6435f(void);\n\nstatic void spi_hal_cs_on(void);\nstatic void spi_hal_cs_off(void);\nstatic void spi_hal_all_cs_off(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid spi_hal_setup_for_device(spi_hal_device_t device)\n{\n if (current_device != device)\n {\n current_device = device;\n\n spi_hal_all_cs_off();\n\n switch (device)\n {\n case SPI_DEVICE_RFM95W:\n spi_hal_setup_for_rfm95w();\n break;\n\n case SPI_DEVICE_LIS2HH12:\n spi_hal_setup_for_lis2hh12();\n break;\n\n case SPI_DEVICE_PCM1770:\n spi_hal_setup_for_pcm1770();\n break;\n\n case SPI_DEVICE_EXT_FLASH:\n spi_hal_setup_for_mx25r6435f();\n break;\n\n default:\n break;\n\n }\n }\n}\n\nuint16_t spi_hal_tranceive16(uint16_t v)\n{\n\tuint16_t read_value = 0;\n\n\tspi_hal_cs_on();\n\n SPI1BUFH = 0;\n\tSPI1BUFL = v;\n\n\twhile (SPI1STATLbits.SPIRBE) {;}\n\n read_value = SPI1BUFH;\n\tread_value = SPI1BUFL;\n\n\tspi_hal_cs_off();\n\n\treturn read_value;\n}\n\nuint8_t spi_hal_tranceive8(uint8_t v)\n{\n uint8_t read_value = 0;\n\n SPI1BUFH = 0;\n SPI1BUFL = v;\n\n while (SPI1STATLbits.SPIRBE) {;}\n\n read_value = SPI1BUFH;\n read_value = SPI1BUFL;\n\n return read_value;\n}\n\nvoid spi_hal_read16_block(uint16_t * read_data,\n uint16_t length)\n{\n {\n SPI1IMSKL = 0; // No interrupts\n SPI1IMSKH = 0; // No interrupts\n \n SPI1CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI1CON1L = 0;\n SPI1CON1H = 0;\n\n SPI1CON1Lbits.MODE32 = 1; // 32 bit mode\n SPI1CON1Lbits.MODE16 = 0; // 32 bit mode\n SPI1CON1Lbits.CKP = 0; // Clock idle low\n SPI1CON1Lbits.CKE = 1; // Transmit at active to idle clk transition\n SPI1CON1Lbits.MSTEN = 1; // Master mode\n SPI1CON1Lbits.ENHBUF = 1; // Use enhanced buffer mode\n\n SPI1CON2L = 0; // 16 bit mode\n SPI1STATL = 0; // Clear any errors\n\n SPI1BRGL = MX25R6435F_BRG;\n\n SPI1CON1Lbits.SPIEN = 1;\n }\n\n uint16_t read_count;\n uint16_t write_count;\n\n length = length >> 2;\n read_count = length;\n write_count = length;\n\n SPI1BUFL = 0;\n SPI1BUFH = 0;\n\n while (1 != read_count)\n {\n if (SPI1STATLbits.SPITBE)\n {\n SPI1BUFL = 0;\n SPI1BUFH = 0;\n --write_count;\n }\n\n if (!SPI1STATLbits.SPIRBE)\n {\n *(read_data + 1) = SPI1BUFL;\n *read_data = SPI1BUFH;\n read_data += 2;\n --read_count;\n }\n }\n\n while (SPI1STATLbits.SPIRBE) {;}\n \n *(read_data + 1) = SPI1BUFL;\n *read_data = SPI1BUFH;\n read_data += 2;\n\n {\n SPI1IMSKL = 0; // No interrupts\n SPI1IMSKH = 0; // No interrupts\n \n SPI1CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI1CON1L = 0;\n SPI1CON1H = 0;\n\n SPI1CON1Lbits.MODE32 = 0; // 8 bit mode\n SPI1CON1Lbits.MODE16 = 0; // 8 bit mode\n SPI1CON1Lbits.CKP = 0; // Clock idle low\n SPI1CON1Lbits.CKE = 1; // Transmit at active to idle clk transition\n SPI1CON1Lbits.MSTEN = 1; // Master mode\n SPI1CON1Lbits.ENHBUF = 1; // Use enhanced buffer mode\n\n SPI1CON2L = 0; // 16 bit mode\n SPI1STATL = 0; // Clear any errors\n\n SPI1BRGL = MX25R6435F_BRG;\n \n SPI1CON1Lbits.SPIEN = 1;\n }\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void spi_hal_setup_for_lis2hh12(void)\n{\n\tSPI1IMSKL = 0; \t\t\t\t// No interrupts\n\tSPI1IMSKH = 0;\t\t\t\t// No interrupts\n\t\n\tSPI1CON1Lbits.SPIEN = 0;\t// Turn of and reset the module\n\n\tSPI1CON1L = 0;\n\tSPI1CON1H = 0;\n\n\tSPI1CON1Lbits.MODE32 = 0;\t// 16 bit mode\n\tSPI1CON1Lbits.MODE16 = 1;\t// 16 bit mode\n\tSPI1CON1Lbits.CKP = 1; \t\t// Clock idle high\n\tSPI1CON1Lbits.CKE = 0; \t\t// Transmit at idle to active clk transition\n\tSPI1CON1Lbits.MSTEN = 1;\t// Master mode\n\tSPI1CON1Lbits.ENHBUF = 1;\t// Use enhanced buffer mode\n\n\tSPI1CON2L = 0;\t\t\t\t// 16 bit mode\n\tSPI1STATL = 0; \t\t\t\t// Clear any errors\n\n\tSPI1BRGL = LIS2HH12_BRG;\n\n\tACC_CS_OFF;\n\tSPI1CON1Lbits.SPIEN = 1;\n}\n\nstatic void spi_hal_setup_for_rfm95w(void)\n{\n SPI1IMSKL = 0; // No interrupts\n SPI1IMSKH = 0; // No interrupts\n \n SPI1CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI1CON1L = 0;\n SPI1CON1H = 0;\n\n SPI1CON1Lbits.MODE32 = 0; // 16 bit mode\n SPI1CON1Lbits.MODE16 = 1; // 16 bit mode\n SPI1CON1Lbits.CKP = 0; // Clock idle low\n SPI1CON1Lbits.CKE = 1; // Transmit at active to idle clk transition\n SPI1CON1Lbits.MSTEN = 1; // Master mode\n SPI1CON1Lbits.ENHBUF = 1; // Use enhanced buffer mode\n\n SPI1CON2L = 0; // 16 bit mode\n SPI1STATL = 0; // Clear any errors\n\n SPI1BRGL = RFM95W_BRG;\n\n LORA_CS_OFF;\n SPI1CON1Lbits.SPIEN = 1;\n}\n\nstatic void spi_hal_setup_for_pcm1770(void)\n{\n SPI1IMSKL = 0; // No interrupts\n SPI1IMSKH = 0; // No interrupts\n \n SPI1CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI1CON1L = 0;\n SPI1CON1H = 0;\n\n SPI1CON1Lbits.MODE32 = 0; // 16 bit mode\n SPI1CON1Lbits.MODE16 = 1; // 16 bit mode\n SPI1CON1Lbits.CKP = 1; // Clock idle high\n SPI1CON1Lbits.CKE = 0; // Transmit at idle to active clk transition\n SPI1CON1Lbits.MSTEN = 1; // Master mode\n SPI1CON1Lbits.ENHBUF = 1; // Use enhanced buffer mode\n\n SPI1CON2L = 0; // 16 bit mode\n SPI1STATL = 0; // Clear any errors\n\n SPI1BRGL = PCM1770_BRG;\n\n PCM1770_CS_OFF;\n SPI1CON1Lbits.SPIEN = 1;\n}\n\n/*\n CS ‾‾‾‾‾__________\n\n\n SCLK ___‾‾__‾‾__‾‾__\n SI DD DD (MSB first)\n SO ______<DD><DD><D \n\n Data sampled on rising edge\n Clock idle low\n*/\nstatic void spi_hal_setup_for_mx25r6435f(void)\n{\n SPI1IMSKL = 0; // No interrupts\n SPI1IMSKH = 0; // No interrupts\n \n SPI1CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI1CON1L = 0;\n SPI1CON1H = 0;\n\n SPI1CON1Lbits.MODE32 = 0; // 8 bit mode\n SPI1CON1Lbits.MODE16 = 0; // 8 bit mode\n SPI1CON1Lbits.CKP = 0; // Clock idle low\n SPI1CON1Lbits.CKE = 1; // Transmit at active to idle clk transition\n SPI1CON1Lbits.MSTEN = 1; // Master mode\n SPI1CON1Lbits.ENHBUF = 0; // Don't use enhanced buffer mode\n\n SPI1CON2L = 0; // 16 bit mode\n SPI1STATL = 0; // Clear any errors\n\n SPI1BRGL = MX25R6435F_BRG;\n\n FLASH_CS_OFF;\n SPI1CON1Lbits.SPIEN = 1;\n}\n\nstatic void spi_hal_cs_on(void)\n{\n\tswitch (current_device)\n\t{\n\tcase SPI_DEVICE_NULL:\n\t\tbreak;\n\n case SPI_DEVICE_RFM95W:\n \tLORA_CS_ON;\n \tbreak;\n\n case SPI_DEVICE_LIS2HH12:\n \tACC_CS_ON;\n \tbreak;\n\n case SPI_DEVICE_PCM1770:\n \tPCM1770_CS_ON;\n \tbreak;\n\n case SPI_DEVICE_EXT_FLASH:\n \tFLASH_CS_ON;\n \tbreak;\n }\n}\n\nstatic void spi_hal_cs_off(void)\n{\n\tswitch (current_device)\n\t{\n\tcase SPI_DEVICE_NULL:\n\t\tbreak;\n\n case SPI_DEVICE_RFM95W:\n \tLORA_CS_OFF;\n \tbreak;\n\n case SPI_DEVICE_LIS2HH12:\n \tACC_CS_OFF;\n \tbreak;\n\n case SPI_DEVICE_PCM1770:\n \tPCM1770_CS_OFF;\n \tbreak;\n\n case SPI_DEVICE_EXT_FLASH:\n \tFLASH_CS_OFF;\n \tbreak;\n }\n}\n\nstatic void spi_hal_all_cs_off(void)\n{\n\tLORA_CS_OFF;\n\tACC_CS_OFF;\n\tPCM1770_CS_OFF;\n\tFLASH_CS_OFF;\n}\n" }, { "alpha_fraction": 0.4268699288368225, "alphanum_fraction": 0.4325869381427765, "avg_line_length": 26.605262756347656, "blob_id": "7872556e1aecc2b63397050c028268409d6eca7c", "content_id": "34763163d3f67e63fe2635a639c88c92656f66ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2099, "license_type": "no_license", "max_line_length": 80, "num_lines": 76, "path": "/life_jacket.X/inc/audio/audio.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef AUDIO_H\n#define AUDIO_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n#define AUDIO_BUFFER_LENGTH (512)\n#define AUDIO_BUFFER_SIZE (AUDIO_BUFFER_LENGTH * 2)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Initializes the audio module.\n*/\nvoid audio_init(void);\n\n/**\n @brief Starts an audio playback session.\n @param track_number - Number of the track to play.\n*/\nvoid audio_start_playback_session(uint16_t track_number);\n\n/**\n @brief Fills the audio back buffer if there is more data, or ends the\n playback session if the track has finished.\n*/\nvoid audio_handle_buffer_update(void);\n\n/**\n @brief Checks if an audio playback is active.\n @return True if an audio track is being played.\n */\nbool audio_is_playback_in_progress(void);\n\n/**\n @brief Gets the audio front buffer.\n @return Front buffer.\n*/\nconst int16_t * audio_get_front_buffer(void);\n\n/**\n @brief Swaps the front and back buffers.\n*/\nvoid audio_switch_buffer(void);\n\n/**\n @brief Gets the min and max addresses used by the audio buffer.\n*/\nvoid audio_get_sample_pointer_limits(int16_t ** min, int16_t ** max);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* AUDIO_H */\n\n" }, { "alpha_fraction": 0.48414841294288635, "alphanum_fraction": 0.5052419900894165, "avg_line_length": 22.185588836669922, "blob_id": "27eb856ed919e154a28189ea7bbc391fd082e85d", "content_id": "d003ad114968f1211e1211c487efedaddcb8adf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 31891, "license_type": "no_license", "max_line_length": 87, "num_lines": 1374, "path": "/life_jacket.X/src/uart/terminal.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "WINDOWS-1252", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n#include \"uart\\terminal.h\"\n\n#include <stdbool.h>\n#include <stdint.h>\n#include <string.h>\n#include <stdio.h>\n#include <ctype.h>\n#include <stdlib.h>\n\n#include \"hal/uart.h\"\n#include \"uart/debug_log.h\"\n#include \"uart/terminal_help.h\"\n#include \"hal/gpio.h\"\n#include \"hal/flash.h\"\n#include \"gps/jf2_uart.h\"\n#include \"gps/jf2_io.h\"\n#include \"gps/nmea.h\"\n#include \"acc/accelerometer.h\"\n#include \"lora/rfm95w.h\"\n#include \"lora/rfm95w_io.h\"\n#include \"lora/p2pc_protocol.h\"\n#include \"audio/ext_flash.h\"\n#include \"audio/audio.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n#define HEX_BYTE_STR_LEN (2 + 1)\n#define HEX_WORD_STR_LEN (4 + 1)\n#define HEX_DWORD_STR_LEN (8 + 1)\n\nstatic const char NEWLINE[] = \"\\r\\n\";\n\nstatic const char CMD_TYPE_HELP[] = \"help\";\nstatic const char CMD_TYPE_SET[] = \"set \";\nstatic const char CMD_TYPE_GET[] = \"get \";\n\nstatic const char SYNTAX_ERROR[] = \"[Syntax error]\";\nstatic const char ARGUMENT_ERROR[] = \"[Invalid argument]\";\n\n#define CMD_BUFFER_SIZE 257\n#define EXT_FLASH_BUFFER_LEN (128)\n\n//\n// Commands\n//\n\n/*§\n Say hi!\n */\nstatic const char CMD_HELLO[] = \"hello\";\n\n/*§\n Forces a software reboot.\n */\nstatic const char CMD_SYSTEM_RESET[] = \"system reset\";\n\n/*§\n Initiates the flash write buffer with the contents of theflash data memory.\n */\nstatic const char CMD_INIT_WRITE_BUFFER[] = \"init flash bufffer\";\n\n/*§\n Writes one byte to the flash buffer.\n Paramters: <index in hex format> <one byte value in hex format>\n */\nstatic const char CMD_BUFFERED_WRITE[] = \"buffered write\";\n\n/*§\n Write the contents of the flash buffer to the flash memory.\n */\nstatic const char CMD_FLUSH_BUFFER[] = \"flush flash buffer\";\n\n/*§\n Starts a LORA CW transmission.\n */\nstatic const char CMD_LORA_CW[] = \"lora cw\";\n\n/*§\n Starts a LoRa GPS position broadcast.\n */\nstatic const char CMD_LORA_GPS_BROADCAST[] = \"lora gps broadcast\";\n\n/*§\n Starts continuous rx LoRa mode.\n */\nstatic const char CMD_LORA_CONTIUOUS_RX[] = \"lora cont rx\";\n\n/*§\n Sends a on/off pulse to the GPS module.\n */\nstatic const char CMD_GPS_ON_OFF_PULSE[] = \"gps on off pulse\";\n\n/*§\n Erases the whole external flash memory.\n */\nstatic const char CMD_EXT_FLASH_CHIP_ERASE[] = \"ext flash chip erase\";\n\n/*§\n Writes test data to the first page (first 256 bytes).\n*/\nstatic const char CMD_EXT_FLASH_WRITE_TEST[] = \"ext flash write test\";\n\n/*§\n Sets two bytes in the page buffer. Word indexed.\n Parameters: <index in range [0, 127]> <value to set as 4 hex digits>\n*/\nstatic const char CMD_EXT_FLASH_SET_PAGE_BUFFER[] = \"ef spb\";\n\n/*§\n Writes the page buffer to a page in the external flash memory.\n Parameters: <page address as 6 hex digits>\n*/\nstatic const char CMD_EXT_FLASH_WRITE_PAGE[] = \"ef wp\";\n\n/*§\n Runs a audio session test.\n*/\nstatic const char CMD_TEST_AUDIO_SESSION[] = \"test audio session\";\n\n/*§\n Writes test audio data to the external flash memory.\n*/\nstatic const char CMD_WRITE_AUDIO_TEST_DATA[] = \"write audio test data\";\n\n/*§\n Resets the debug UART module.\n*/\nstatic const char CMD_RESET_UART[] = \"restart uart\";\n\n/*§\n Gets one byte from the flash data memory.\n Parameter: <index in hex format>\n Returns: <hex value of byte at specified index>\n */\nstatic const char GET_FLASH[] = \"get flash\";\n\n/*§\n Gets one byte from the external flash memory.\n Parameter: <address in hex>\n Returns: <read byte in hex format>\n */\nstatic const char GET_EXT_FLASH[] = \"get ext flash\";\n\n/*§\n Gets 256 of bytes from the external flash memory.\n Parameters: <start address in hex>\n*/\nstatic const char GET_PAGE_EXT_FLASH[] = \"get page ext flash\";\n\n/*§\n Gets the status of the GPS.\n */\nstatic const char GET_GPS_STATUS[] = \"get gps status\";\n\n/*§\n Gets the x, y, z values from the accelerometer.\n */\nstatic const char GET_ORIENTATION[] = \"get orientation\";\n\n/*§\n Gets the LORA address.\n*/\nstatic const char GET_LORA_ADDRESS[] = \"get lora address\";\n\n/*§\n Sets one byte in the flash data memory.\n Paramter: <index in hex format> <one byte value in hex format>\n */\nstatic const char SET_FLASH[] = \"set flash\";\n\n/*§\n Enabled/disables received GPS messages from being echoed onto the debug UART.\n Paramter: <'on' or 'off'>\n */\nstatic const char SET_GPS_ECHO[] = \"set gps echo\";\n\n/*§\n Sets the LoRa channel bandwidth.\n Parameter: <bandwidth setting in range [0, 9]>\n\n 0 = 7.8kHz\n 1 = 10.4kHz\n 2 = 15.6 kHz\n 3 = 20.8 kHz\n 4 = 31.25 kHz\n 5 = 41.7 kHz\n 6 = 62.5 kHz\n 7 = 125 kHz\n 8 = 250 kHz\n 9 = 500 kHz\n*/\nstatic const char SET_LORA_BANDWIDTH[] = \"set lora bw\";\n\n/*§\n Sets the LoRa coding rate.\n Parameter: <coding rate setting in range[1, 4]>\n\n 1 = coding rate 4/5\n 2 = coding rate 4/6\n 3 = coding rate 4/7\n 4 = coding rate 4/8\n */\nstatic const char SET_LORA_CODING_RATE[] = \"set lora cr\";\n\n/*§\n Sets the LoRa spreading factor.\n Parameter: <spreading factor in range [6, 12]>\n\n A spreading factor of 'sf' gives 2^(sf) chips \n */\nstatic const char SET_LORA_SPREADING_FACTOR[] = \"set lora sf\";\n\n/*§\n Sets the LoRa frequency.\n Parameter: <frequency band in range [1, 8]>\n\n Bands:\n 1 = 868.1 MHz\n 2 = 868.3 MHz\n 3 = 868.5 MHz\n 4 = 867.1 MHz\n 5 = 867.3 MHz\n 6 = 867.5 MHz\n 7 = 867.7 MHz\n 8 = 867.9 MHz\n */\nstatic const char SET_LORA_FREQUENCY[] = \"set lora freq\";\n\n/*§\n Enables/disables sleep mode.\n Paramter: <'on' or 'off'>\n*/\nstatic const char SET_SLEEP_ALLOWED[] = \"set sleep allowed\";\n\n/*§\n Enables/disables the debug log.\n Paramter: <'on' or 'off'>\n*/\nstatic const char SET_DEBUG_LOG_ENABLE[] = \"set debug log enable\";\n\n/*§\n Sets the LORA protocol to p2ps.\n*/\nstatic const char SET_LORA_P2PS[] = \"set lora to p2ps\";\n\n/*§\n Sets the LORA protocol to p2pc.\n*/\nstatic const char SET_LORA_P2PC[] = \"set lora to p2pc\";\n\n/*§\n Sets the LORA P2P address.\n Parameter: <address as 8 digit hex number>\n*/\nstatic const char SET_LORA_ADDRESS[] = \"set lora address\";\n\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic char cmd_buffer[CMD_BUFFER_SIZE] = {0};\nstatic bool arg_error = false;\nstatic bool is_sleep_allowed = true;\n\nstatic uint16_t ext_flash_page_buff[EXT_FLASH_BUFFER_LEN];\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n/**\n * @brief Copies the uart rx buffer to the cmd_buffer.\n */\nstatic void copy_to_cmd_buffer(void);\n\n/**\n * @brief Parses and executes the command in the command buffer.\n */\nstatic void execute_command(void);\n\n//\n// Command help functions\n//\nstatic void cmd_hello(void);\nstatic void cmd_system_reset(void);\nstatic void cmd_init_flash_buffer(void);\nstatic void cmd_buffered_write(void);\nstatic void cmd_flush_buffer(void);\nstatic void cmd_lora_cw(void);\nstatic void cmd_lora_gps_broadcast(void);\nstatic void cmd_lora_contiuous_rx(void);\nstatic void cmd_gps_on_off_pulse(void);\nstatic void cmd_ext_flash_chip_erase(void);\nstatic void cmd_ext_flash_write_test(void);\nstatic void cmd_ext_flash_set_page_buffer(void);\nstatic void cmd_ext_flash_write_page(void);\nstatic void cmd_test_audio_session(void);\nstatic void cmd_write_audio_test_data(void);\nstatic void cmd_reset_uart(void);\n\nstatic void get_flash(void);\nstatic void get_ext_flash(void);\nstatic void get_page_ext_flash(void);\nstatic void get_gps_status(void);\nstatic void get_orientation(void);\nstatic void get_lora_address(void);\n\nstatic void set_flash(void);\nstatic void set_gps_echo(void);\nstatic void set_lora_bandwidth(void);\nstatic void set_lora_coding_rate(void);\nstatic void set_lora_spreading_factor(void);\nstatic void set_lora_frequency_band(void);\nstatic void set_sleep_allowed(void);\nstatic void set_debug_log_enable(void);\nstatic void set_lora_to_p2ps(void);\nstatic void set_lora_to_p2pc(void);\nstatic void set_lora_address(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid terminal_handle_uart_event(void)\n{\n copy_to_cmd_buffer();\n execute_command();\n}\n\nbool termnial_allows_sleep(void)\n{\n return is_sleep_allowed;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void copy_to_cmd_buffer(void)\n{\n uint16_t nbr_of_bytes;\n uint16_t current_index = 0;\n\n nbr_of_bytes = uart_get_receive_buffer_size();\n\n for (current_index = 0; current_index != nbr_of_bytes; ++current_index)\n {\n cmd_buffer[current_index] = (char)uart_get(current_index);\n }\n\n cmd_buffer[current_index] = NULL;\n\n uart_clear_receive_buffer();\n}\n\nstatic void execute_command(void)\n{\n bool syntax_error = false;\n arg_error = false;\n\n if (NULL != strstr(cmd_buffer, CMD_TYPE_HELP))\n {\n terminal_help(cmd_buffer);\n }\n //\n // GET\n //\n else if (NULL != strstr(cmd_buffer, CMD_TYPE_GET))\n {\n if (NULL != strstr(cmd_buffer, GET_FLASH))\n {\n get_flash();\n }\n else if (NULL != strstr(cmd_buffer, GET_EXT_FLASH))\n {\n get_ext_flash();\n }\n else if (NULL != strstr(cmd_buffer, GET_PAGE_EXT_FLASH))\n {\n get_page_ext_flash();\n }\n else if (NULL != strstr(cmd_buffer, GET_GPS_STATUS))\n {\n get_gps_status();\n }\n else if (NULL != strstr(cmd_buffer, GET_ORIENTATION))\n {\n get_orientation();\n }\n else if (NULL != strstr(cmd_buffer, GET_LORA_ADDRESS))\n {\n get_lora_address();\n }\n else\n {\n syntax_error = true;\n }\n }\n //\n // SET\n //\n else if (NULL != strstr(cmd_buffer, CMD_TYPE_SET))\n {\n if (NULL != strstr(cmd_buffer, SET_FLASH))\n {\n set_flash();\n }\n else if (NULL != strstr(cmd_buffer, SET_GPS_ECHO))\n {\n set_gps_echo();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_BANDWIDTH))\n {\n set_lora_bandwidth();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_CODING_RATE))\n {\n set_lora_coding_rate();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_SPREADING_FACTOR))\n {\n set_lora_spreading_factor();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_FREQUENCY))\n {\n set_lora_frequency_band();\n }\n else if (NULL != strstr(cmd_buffer, SET_SLEEP_ALLOWED))\n {\n set_sleep_allowed();\n }\n else if (NULL != strstr(cmd_buffer, SET_DEBUG_LOG_ENABLE))\n {\n set_debug_log_enable();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_P2PS))\n {\n set_lora_to_p2ps();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_P2PC))\n {\n set_lora_to_p2pc();\n }\n else if (NULL != strstr(cmd_buffer, SET_LORA_ADDRESS))\n {\n set_lora_address();\n }\n else\n {\n syntax_error = true;\n }\n }\n //\n // CMD\n //\n else\n {\n if (NULL != strstr(cmd_buffer, CMD_HELLO))\n {\n cmd_hello();\n }\n else if (NULL != strstr(cmd_buffer, CMD_SYSTEM_RESET))\n {\n cmd_system_reset();\n }\n else if (NULL != strstr(cmd_buffer, CMD_INIT_WRITE_BUFFER))\n {\n cmd_init_flash_buffer();\n }\n else if (NULL != strstr(cmd_buffer, CMD_BUFFERED_WRITE))\n {\n cmd_buffered_write();\n }\n else if (NULL != strstr(cmd_buffer, CMD_FLUSH_BUFFER))\n {\n cmd_flush_buffer();\n }\n else if (NULL != strstr(cmd_buffer, CMD_LORA_CW))\n {\n cmd_lora_cw();\n }\n else if (NULL != strstr(cmd_buffer, CMD_LORA_GPS_BROADCAST))\n {\n cmd_lora_gps_broadcast();\n }\n else if (NULL != strstr(cmd_buffer, CMD_LORA_CONTIUOUS_RX))\n {\n cmd_lora_contiuous_rx();\n }\n else if (NULL != strstr(cmd_buffer, CMD_GPS_ON_OFF_PULSE))\n {\n cmd_gps_on_off_pulse();\n }\n else if (NULL != strstr(cmd_buffer, CMD_EXT_FLASH_CHIP_ERASE))\n {\n cmd_ext_flash_chip_erase();\n }\n else if (NULL != strstr(cmd_buffer, CMD_EXT_FLASH_WRITE_TEST))\n {\n cmd_ext_flash_write_test();\n }\n else if (NULL != strstr(cmd_buffer, CMD_EXT_FLASH_SET_PAGE_BUFFER))\n {\n cmd_ext_flash_set_page_buffer();\n }\n else if (NULL != strstr(cmd_buffer, CMD_EXT_FLASH_WRITE_PAGE))\n {\n cmd_ext_flash_write_page();\n }\n else if (NULL != strstr(cmd_buffer, CMD_TEST_AUDIO_SESSION))\n {\n cmd_test_audio_session();\n }\n else if (NULL != strstr(cmd_buffer, CMD_WRITE_AUDIO_TEST_DATA))\n {\n cmd_write_audio_test_data();\n }\n else if (NULL != strstr(cmd_buffer, CMD_RESET_UART))\n {\n cmd_reset_uart();\n }\n else\n {\n syntax_error = true;\n }\n }\n\n uart_disable_tx_interrupt();\n\n if (syntax_error)\n {\n uart_write_string(SYNTAX_ERROR);\n uart_write_string(NEWLINE);\n }\n else if (arg_error)\n {\n uart_write_string(ARGUMENT_ERROR);\n uart_write_string(NEWLINE);\n }\n else\n {\n uart_write_string(\"ok\");\n uart_write_string(NEWLINE);\n }\n\n uart_enable_tx_interrupt();\n}\n\nstatic void cmd_hello(void)\n{\n uart_write_string(\"Hello!\");\n uart_write_string(NEWLINE);\n}\n\nstatic void cmd_system_reset(void)\n{\n __asm__ volatile (\"reset\");\n}\n\nstatic void cmd_init_flash_buffer(void)\n{\n flash_init_write_buffer();\n}\n\nstatic void cmd_buffered_write(void)\n{\n uint8_t * p;\n char address_arg[HEX_WORD_STR_LEN] = {0};\n char value_arg[HEX_BYTE_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_FLASH);\n p += strlen(SET_FLASH);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint16_t address;\n uint8_t value;\n\n //\n // Parse address argument\n //\n while ((i != HEX_WORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n arg_error = arg_error || (i == 0);\n address = (uint16_t)strtol(address_arg, NULL, 16);\n\n //\n // Parse value argument\n //\n p += 1; // +1 for space\n i = 0;\n\n while ((i != HEX_BYTE_STR_LEN) && isxdigit(*p))\n {\n value_arg[i++] = *(p++);\n }\n\n value_arg[i] = NULL;\n arg_error = arg_error || (i == 0);\n value = (uint8_t)strtol(value_arg, NULL, 16);\n\n //\n // Perform flash write\n //\n if (!arg_error && (address < FLASH_MEM_SIZE))\n {\n flash_write_byte_to_buffer((flash_index_t)address, value);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void cmd_flush_buffer(void)\n{\n flash_write_buffer_to_flash();\n}\n\nstatic void cmd_lora_cw(void)\n{\n rfm95w_init();\n rfmw_send_cw();\n}\n\nstatic void cmd_lora_gps_broadcast(void)\n{\n p2pc_protocol_broadcast_gps_position();\n}\n\nstatic void cmd_lora_contiuous_rx(void)\n{\n rfm95w_start_continuous_rx();\n}\n\nstatic void cmd_gps_on_off_pulse(void)\n{\n jf2_io_send_on_pulse();\n}\n\nstatic void cmd_ext_flash_chip_erase(void)\n{\n ext_flash_chip_erase();\n}\n\nstatic void cmd_ext_flash_write_test(void)\n{\n uint8_t test_data[EXT_FLASH_PAGE_LENGTH];\n uint16_t i;\n\n for (i = 0; i != EXT_FLASH_PAGE_LENGTH; ++i)\n {\n test_data[i] = i;\n }\n\n ext_flash_program_page(test_data, 0);\n\n for (i = 0; i != EXT_FLASH_PAGE_LENGTH; ++i)\n {\n test_data[i] = (uint8_t)(i << 1);\n }\n\n ext_flash_program_page(test_data, 256);\n}\n\nstatic void cmd_ext_flash_set_page_buffer(void)\n{\n uint8_t * p;\n char address_arg[HEX_BYTE_STR_LEN] = {0};\n char value_arg[HEX_WORD_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, CMD_EXT_FLASH_SET_PAGE_BUFFER);\n p += strlen(CMD_EXT_FLASH_SET_PAGE_BUFFER);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint16_t address;\n uint16_t value;\n\n //\n // Parse address argument\n //\n while ((i != HEX_BYTE_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n address = (uint16_t)strtol(address_arg, NULL, 16);\n\n ++p; // +1 for space\n\n //\n // Parse value argument\n //\n while ((i != HEX_WORD_STR_LEN) && isxdigit(*p))\n {\n value_arg[i++] = *(p++);\n }\n\n value_arg[i] = NULL;\n value = (uint16_t)strtol(value_arg, NULL, 16);\n\n if (address >= EXT_FLASH_BUFFER_LEN)\n {\n arg_error = true;\n }\n else\n {\n ext_flash_page_buff[address] = value;\n }\n }\n}\n\nstatic void cmd_ext_flash_write_page(void)\n{\n uint8_t * p;\n char address_arg[HEX_DWORD_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, CMD_EXT_FLASH_WRITE_PAGE);\n p += strlen(CMD_EXT_FLASH_WRITE_PAGE);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint32_t address;\n\n //\n // Parse address argument\n //\n while ((i != HEX_DWORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n address = (uint32_t)strtol(address_arg, NULL, 16);\n\n //\n // Perform page write\n //\n if (address < 0x01000000)\n {\n ext_flash_program_page(ext_flash_page_buff, address);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void cmd_test_audio_session(void)\n{\n audio_start_playback_session(0);\n}\n\nstatic void cmd_write_audio_test_data(void)\n{\n uint16_t i;\n uint16_t k;\n uint16_t index;\n uint16_t page[128];\n \n ext_flash_chip_erase();\n\n for (i = 0; i != 128; ++i)\n {\n page[i] = 0x0000;\n }\n\n page[0] = 1; // number of tracks\n\n // Track header for track 0:\n page[1 + 0] = 0; // start address 256\n page[1 + 1] = 256;\n page[1 + 2] = 0x002f; // 4c400 samples (6100*512)\n page[1 + 3] = 0xa800;\n\n ext_flash_program_page(page, 0);\n\n for (i = 0; i != 2; ++i)\n {\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + k;\n page[index] = k * 1024;\n }\n\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + 32 + k;\n page[index] = (32 - k) * 1024;\n }\n }\n\n for (i = 0; i != 6100*4; ++i)\n {\n uint16_t page_number = (2 + i);\n uint32_t address = ((uint32_t)page_number) << 8;\n\n page[0] = i;\n ext_flash_program_page(page, address); \n }\n\n debug_log_append_line(\"Audio test data written!\");\n}\n\nstatic void cmd_reset_uart(void)\n{\n uart_deinit();\n uart_init();\n}\n\nstatic void get_flash(void)\n{\n uint8_t * p;\n char address_arg[HEX_WORD_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, GET_FLASH);\n p += strlen(GET_FLASH);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint16_t address;\n\n //\n // Parse address argument\n //\n while ((i != HEX_WORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n address = (uint16_t)strtol(address_arg, NULL, 16);\n\n //\n // Perform flash read\n //\n if (address < FLASH_MEM_SIZE)\n {\n uint8_t value;\n char ans[32];\n \n value = flash_read_byte((flash_index_t)address);\n sprintf(ans, \"%02X%s\", value, NEWLINE);\n uart_write_string(ans);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void get_ext_flash(void)\n{\n uint8_t * p;\n char address_arg[HEX_DWORD_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, GET_EXT_FLASH);\n p += strlen(GET_EXT_FLASH);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint32_t address;\n\n //\n // Parse address argument\n //\n while ((i != HEX_DWORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n address = (uint32_t)strtol(address_arg, NULL, 16);\n\n //\n // Perform flash read\n //\n if (address < 0x01000000)\n {\n uint8_t value;\n char ans[32];\n \n value = ext_flash_read_byte(address);\n sprintf(ans, \"%02X%s\", value, NEWLINE);\n uart_write_string(ans);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void get_page_ext_flash(void)\n{\n uint8_t * p;\n char address_arg[HEX_DWORD_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, GET_PAGE_EXT_FLASH);\n p += strlen(GET_PAGE_EXT_FLASH);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint32_t address;\n uint16_t page[128];\n\n //\n // Parse address argument\n //\n while ((i != HEX_DWORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n arg_error = arg_error || (i == 0);\n address = (uint32_t)strtol(address_arg, NULL, 16);\n\n //\n // Perform flash read\n //\n if (!arg_error && (address < 0x01000000))\n {\n uint16_t k = 0;\n\n ext_flash_read(&page[0], address, 256);\n\n sprintf(g_uart_string_buffer,\n \"\\r\\n\");\n uart_write_string(g_uart_string_buffer);\n\n for (k = 0; k != 16; ++k)\n {\n sprintf(g_uart_string_buffer,\n \"%04X %04X %04X %04X %04X %04X %04X %04X\\r\\n\",\n page[8 * k + 0],\n page[8 * k + 1],\n page[8 * k + 2],\n page[8 * k + 3],\n page[8 * k + 4],\n page[8 * k + 5],\n page[8 * k + 6],\n page[8 * k + 7]);\n uart_write_string(g_uart_string_buffer);\n\n while (!uart_is_write_buffer_empty()){;}\n }\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void get_gps_status(void)\n{\n nmea_print_status();\n}\n\nstatic void get_orientation(void)\n{\n accelerometer_output_t xyz;\n char s[32] = {0};\n\n accelerometer_get_orientation(&xyz);\n\n sprintf(s, \"\\tx: %u\", xyz.x);\n uart_write_string(s);\n sprintf(s, \"\\r\\n\\ty: %u\", xyz.y);\n uart_write_string(s);\n sprintf(s, \"\\r\\n\\tz: %u\\r\\n\", xyz.z);\n uart_write_string(s);\n}\n\nstatic void get_lora_address(void)\n{\n char s[32] = {0};\n uint32_t address = flash_read_dword(FLASH_INDEX_LORA_ADDRESS_MSB);\n\n sprintf(s, \"\\t%04X%04X\\r\\n\",\n (uint16_t)(address >> 16),\n (uint16_t)address);\n uart_write_string(s);\n}\n\nstatic void set_flash(void)\n{\n uint8_t * p;\n char address_arg[HEX_WORD_STR_LEN] = {0};\n char value_arg[HEX_BYTE_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_FLASH);\n p += strlen(SET_FLASH);\n p += 1; // +1 for space\n\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t i = 0;\n uint16_t address;\n uint8_t value;\n\n //\n // Parse address argument\n //\n while ((i != HEX_WORD_STR_LEN) && isxdigit(*p))\n {\n address_arg[i++] = *(p++);\n }\n\n address_arg[i] = NULL;\n arg_error = arg_error || (i == 0);\n address = (uint16_t)strtol(address_arg, NULL, 16);\n\n //\n // Parse value argument\n //\n p += 1; // +1 for space\n i = 0;\n \n while ((i != HEX_BYTE_STR_LEN) && isxdigit(*p))\n {\n value_arg[i++] = *(p++);\n }\n\n value_arg[i] = NULL;\n arg_error = arg_error || (i == 0);\n value = (uint8_t)strtol(value_arg, NULL, 16);\n \n //\n // Perform flash write\n //\n if (!arg_error && (address < FLASH_MEM_SIZE))\n {\n flash_init_write_buffer();\n flash_write_byte_to_buffer((flash_index_t)address, value);\n flash_write_buffer_to_flash();\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void set_gps_echo(void)\n{\n uint8_t * p;\n\n p = (uint8_t*)strstr(cmd_buffer, SET_GPS_ECHO);\n p += strlen(SET_GPS_ECHO);\n p += 1; // +1 for space\n\n if (('o' == *p) && ('n' == *(p + 1)))\n {\n jf2_uart_enable_debug_uart_echo(true);\n } else if (('o' == *p) && ('f' == *(p + 1)) && ('f' == *(p + 2)))\n {\n jf2_uart_enable_debug_uart_echo(false);\n }\n else\n {\n arg_error = true;\n }\n}\n\nstatic void set_lora_bandwidth(void)\n{\n uint8_t * p;\n char bandwidth_arg[2] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_LORA_BANDWIDTH);\n p += strlen(SET_LORA_BANDWIDTH);\n p += 1; // +1 for space\n\n if (!isdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t bandwidth;\n bandwidth_arg[0] = *p;\n\n bandwidth = (uint8_t)strtol(bandwidth_arg, NULL, 10);\n\n if ((bandwidth >= RFM95W_BW_7K8) && (bandwidth <= RFM95W_BW_500K))\n {\n rfm95w_io_set_bandwidth((rfm95w_modem_cfg_bw_t)bandwidth);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void set_lora_coding_rate(void)\n{\n uint8_t * p;\n char coding_rate_arg[2] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_LORA_CODING_RATE);\n p += strlen(SET_LORA_CODING_RATE);\n p += 1; // +1 for space\n\n if (!isdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t coding_rate;\n coding_rate_arg[0] = *p;\n\n coding_rate = (uint8_t)strtol(coding_rate_arg, NULL, 10);\n\n if ((coding_rate >= RFM95W_CODING_RATE_4_5) &&\n (coding_rate <= RFM95W_CODING_RATE_4_8))\n {\n rfm95w_io_set_coding_rate((rfm95w_coding_rate_t)coding_rate);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void set_lora_spreading_factor(void)\n{\n uint8_t * p;\n char spreading_factor_arg[3] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_LORA_SPREADING_FACTOR);\n p += strlen(SET_LORA_SPREADING_FACTOR);\n p += 1; // +1 for space\n\n if (!isdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t spreading_factor;\n spreading_factor_arg[0] = *p;\n ++p;\n\n if (isdigit(*p))\n {\n spreading_factor_arg[1] = *p;\n }\n\n spreading_factor = (uint8_t)strtol(spreading_factor_arg, NULL, 10);\n\n if ((spreading_factor >= RFM95W_SPREADING_FACTOR_64_CHIPS) &&\n (spreading_factor <= RFM95W_SPREADING_FACTOR_4096_CHIPS))\n {\n rfm95w_io_set_speading_factor((rfm95w_spreading_factor_t)spreading_factor);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void set_lora_frequency_band(void)\n{\n uint8_t * p;\n char frequency_arg[2] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_LORA_FREQUENCY);\n p += strlen(SET_LORA_FREQUENCY);\n p += 1; // +1 for space\n\n if (!isdigit(*p))\n {\n arg_error = true;\n }\n else\n {\n uint8_t frequency;\n frequency_arg[0] = *p;\n\n frequency = (uint8_t)strtol(frequency_arg, NULL, 10);\n\n if ((frequency >= RFM95W_CHANNEL_FREQUENCY_868_1) &&\n (frequency <= RFM95W_CHANNEL_FREQUENCY_867_9))\n {\n rfm95w_io_set_frequency((rfm95w_channel_frequency_t)frequency);\n }\n else\n {\n arg_error = true;\n }\n }\n}\n\nstatic void set_sleep_allowed(void)\n{\n uint8_t * p;\n\n p = (uint8_t*)strstr(cmd_buffer, SET_SLEEP_ALLOWED);\n p += strlen(SET_SLEEP_ALLOWED);\n p += 1; // +1 for space\n\n if (('o' == *p) && ('n' == *(p + 1)))\n {\n is_sleep_allowed = true;\n }\n else if (('o' == *p) && ('f' == *(p + 1)) && ('f' == *(p + 2)))\n {\n is_sleep_allowed = false;\n }\n else\n {\n arg_error = true;\n }\n}\n\nstatic void set_debug_log_enable(void)\n{\n uint8_t * p;\n\n p = (uint8_t*)strstr(cmd_buffer, SET_DEBUG_LOG_ENABLE);\n p += strlen(SET_DEBUG_LOG_ENABLE);\n p += 1; // +1 for space\n\n if (('o' == *p) && ('n' == *(p + 1)))\n {\n debug_log_enable(true);\n }\n else if (('o' == *p) && ('f' == *(p + 1)) && ('f' == *(p + 2)))\n {\n debug_log_enable(false);\n }\n else\n {\n arg_error = true;\n } \n}\n\nstatic void set_lora_to_p2ps(void)\n{\n flash_init_write_buffer();\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_P2PS_NOT_P2PC, true);\n flash_write_buffer_to_flash();\n}\n\nstatic void set_lora_to_p2pc(void)\n{\n flash_init_write_buffer();\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_P2PS_NOT_P2PC, false);\n flash_write_buffer_to_flash();\n}\n\nstatic void set_lora_address(void)\n{\n uint8_t * p;\n uint8_t i = 0;\n char address_arg[HEX_BYTE_STR_LEN] = {0};\n\n p = (uint8_t*)strstr(cmd_buffer, SET_LORA_ADDRESS);\n p += strlen(SET_LORA_ADDRESS);\n p += 1; // +1 for space\n\n for (i = 0; i != 8; ++i)\n {\n if (!isxdigit(*p))\n {\n arg_error = true;\n }\n }\n\n if (!arg_error)\n {\n uint32_t address = 0x00000000;\n\n //\n // Parse address argument\n //\n i = 0;\n address_arg[i++] = *(p++);\n address_arg[i++] = *(p++);\n address_arg[i++] = 0;\n address = ((uint32_t)strtol(address_arg, NULL, 16)) << 24;\n\n i = 0;\n address_arg[i++] = *(p++);\n address_arg[i++] = *(p++);\n address_arg[i++] = 0;\n address |= ((uint32_t)strtol(address_arg, NULL, 16)) << 16;\n\n i = 0;\n address_arg[i++] = *(p++);\n address_arg[i++] = *(p++);\n address_arg[i++] = 0;\n address |= ((uint32_t)strtol(address_arg, NULL, 16)) << 8;\n\n i = 0;\n address_arg[i++] = *(p++);\n address_arg[i++] = *(p++);\n address_arg[i++] = 0;\n address |= ((uint32_t)strtol(address_arg, NULL, 16)) << 0;\n\n flash_init_write_buffer();\n flash_write_dword_to_buffer(FLASH_INDEX_LORA_ADDRESS_MSB, address);\n flash_write_buffer_to_flash();\n }\n}\n\n" }, { "alpha_fraction": 0.3511543273925781, "alphanum_fraction": 0.35236936807632446, "avg_line_length": 25.967212677001953, "blob_id": "7a13aac451808b451867303d9ac64c641857d1dd", "content_id": "838457652424fcc59674cd75c901c9d896da6f0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1646, "license_type": "no_license", "max_line_length": 80, "num_lines": 61, "path": "/life_jacket.X/inc/gps/gps.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef GPS_H\n#define GPS_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n#include \"gps/nmea.h\"\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n#define GPS_BROADCAST_INTERVAL_SEC (30)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n * @brief Initializes the GPS receiver.\n */\nvoid gps_init(void);\n\n/**\n * @brief Should be polled from the main loop.\n */\nvoid gps_poll(void);\n\n/**\n * @brief Checks if the GPS receiver allows the MCU to enter sleep mode.\n * @return True if the MCU can enter sleep.\n */\nbool gps_allows_sleep_mode(void);\n\n/**\n @Brief Gets the current GPS coordinates.\n @return Pointer to a GPS coordinates structure.\n*/\nconst nmea_coordinates_info_t * gps_get_coordinates(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* GPS_H */\n\n" }, { "alpha_fraction": 0.36434462666511536, "alphanum_fraction": 0.3913840651512146, "avg_line_length": 27.324674606323242, "blob_id": "9d86eac41cefdf5d73a080adce838c991ec93c98", "content_id": "8bea9b2ccf3e042179f80a91d0889803ca207524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2182, "license_type": "no_license", "max_line_length": 80, "num_lines": 77, "path": "/life_jacket.X/inc/lora/p2p_protocol.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef P2P_PROTOCOL_H\n#define P2P_PROTOCOL_H\n\n/*\nThis file handles the Point to Point (P2P) protocol.\n*/\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n \n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n P2P_DATA_TYPE_ACK = 0x01,\n P2P_DATA_TYPE_GPS_POSITION = 0x02\n} p2p_data_type_t;\n\ntypedef struct p2p_frame_header_t\n{\n uint32_t source_address;\n uint32_t destination_address;\n uint8_t frame_number;\n uint8_t time_to_live;\n uint8_t protocol;\n p2p_data_type_t data_type;\n} p2p_frame_header_t;\n\ntypedef enum\n{\n P2P_INDEX_SOURCE = 0,\n P2P_INDEX_DESTINATION = 4,\n P2P_INDEX_TIME_TO_LIVE = 8,\n P2P_INDEX_FRAME_NUMBER = 9,\n P2P_INDEX_PROTOCOL = 10,\n P2P_INDEX_DATA_TYPE = 11,\n P2P_INDEX_APPLICATION = 12,\n} p2p_message_index_t;\n\ntypedef enum\n{\n P2P_GPS_INDEX_LATITUDE_DEG = 0,\n P2P_GPS_INDEX_LONGITUDE_DEG = 2,\n P2P_GPS_INDEX_LATITUDE_MINUTES = 4,\n P2P_GPS_INDEX_LONGITUDE_MINUTES = 8,\n P2P_GPS_INDEX_TOF_HOURS = 12,\n P2P_GPS_INDEX_TOF_MINUTES = 13,\n P2P_GPS_INDEX_TOD_SECONDS = 14\n} p2p_gps_message_index_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* P2P_PROTOCOL_H */\n\n" }, { "alpha_fraction": 0.515870988368988, "alphanum_fraction": 0.5303225517272949, "avg_line_length": 29.503936767578125, "blob_id": "b5f4dd2cdbcc6614df1bc76d81b12aa0f8501c90", "content_id": "2bb1793477c4575a68931780bc166fc87c4aab1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3875, "license_type": "no_license", "max_line_length": 80, "num_lines": 127, "path": "/life_jacket.X/inc/hal/flash.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\n * This file allows one to use the flash memory as non volatile data memory.\n *\n * The data memory is 1536 bytes long and resides in the instruction\n * memory space. In order to change one byte, the whole 1536 bytes sector has\n * to be erased and reprogramed. Therefore writes should be done together as\n * much as possible in order to minimize wear.\n *\n * Write operations are slow and will stall the whole processor.\n */\n\n\n#ifndef FLASH_H\n#define\tFLASH_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdbool.h>\n#include <stdint.h>\n \n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n FLASH_INDEX_WDT_RESETS = 0x00,\n // LORA parameters\n FLASH_INDEX_LORA_PARAMS_INITIALIZED = 0x01,\n FLASH_INDEX_LORA_BANDWIDTH = 0x02,\n FLASH_INDEX_LORA_CODING_RATE = 0x03,\n FLASH_INDEX_LORA_SPREADING_FACTOR = 0x04,\n FLASH_INDEX_LORA_FREQUENCY = 0x05,\n FLASH_INDEX_LORA_ADDRESS_MSB = 0x06,\n FLASH_INDEX_LORA_ADDRESS_HIGH_MID = 0x07,\n FLASH_INDEX_LORA_ADDRESS_LOW_MID = 0x08,\n FLASH_INDEX_LORA_ADDRESS_LSB = 0x09,\n FLASH_INDEX_LORA_P2PS_NOT_P2PC = 0x0A,\n\n} flash_index_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n\n// Number of bytes the can be stored in the data memory.\n#define FLASH_MEM_SIZE 1024\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n * @brief Initializes the flash by setting any missing values to their default.\n */\nvoid flash_init(void);\n\n/**\n * @brief Reads one byte from the flash memory.\n * @param index Index to read from.\n * @return the read value.\n */\nuint8_t flash_read_byte(flash_index_t index);\n\n/**\n * @brief Reads one word from the flash memory.\n * @param index Index to read from.\n * @return the read value.\n */\nuint16_t flash_read_word(flash_index_t index);\n\n/**\n * @brief Reads one double word from the flash memory.\n * @param index Index to read from.\n * @return the read value.\n */\nuint32_t flash_read_dword(flash_index_t index);\n\n/**\n * @brief Makes a RAM copy of the flash data memory.\n */\nvoid flash_init_write_buffer(void);\n\n/**\n * @brief Updates one byte in the data memory buffer.\n * @param index Index to the byte to modify.\n * @param data The byte to write.\n */\nvoid flash_write_byte_to_buffer(flash_index_t index, uint8_t data);\n\n/**\n * @brief Updates one word in the data memory buffer.\n * @param index Index to the word to modify.\n * @param data The word to write.\n */\nvoid flash_write_word_to_buffer(flash_index_t index, uint16_t data);\n\n/**\n * @brief Updates one double word in the data memory buffer.\n * @param index Index to the dword to modify.\n * @param data The dword to write.\n */\nvoid flash_write_dword_to_buffer(flash_index_t index, uint32_t data);\n\n/**\n * @brief Writes the data memory buffer to the flash memory.\n * @details This function is blocking and will stall the entire cpu\n * for up to a few milliseconds.\n */\nvoid flash_write_buffer_to_flash(void);\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* FLASH_H */\n\n" }, { "alpha_fraction": 0.3745139241218567, "alphanum_fraction": 0.3810948133468628, "avg_line_length": 24.51908302307129, "blob_id": "6c306c1e64aeebefe9692ec8692fe15c2fc130cc", "content_id": "025d5bd3f0122401861d8a7f3dc721eb009a5633", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3343, "license_type": "no_license", "max_line_length": 80, "num_lines": 131, "path": "/life_jacket.X/src/gps/nmea_queue.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <string.h>\n#include <stdint.h>\n#include <stdbool.h>\n\n#include \"gps/nmea_queue.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n#define NMEA_QUEUE_SIZE (5)\n\nstruct nmea_queue_struct_t\n{\n char messages[NMEA_QUEUE_SIZE][NMEA_MAX_MESSAGE_LENGTH];\n uint16_t message_lengths[NMEA_QUEUE_SIZE];\n uint16_t first;\n uint16_t last;\n uint16_t size;\n};\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n/*\n FIFO queue, append at the \"last\" index.\n*/\nstatic struct nmea_queue_struct_t rx_queue;\nstatic struct nmea_queue_struct_t tx_queue;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid nmea_queue_init(nmea_queue_t queue)\n{\n memset((void*)queue, 0, sizeof(struct nmea_queue_struct_t));\n}\n\nnmea_queue_t nmea_queue_get_rx_queue(void)\n{\n return &rx_queue;\n}\n\nnmea_queue_t nmea_queue_get_tx_queue(void)\n{\n return &tx_queue;\n}\n\nvoid nmea_queue_append(nmea_queue_t queue,\n char * message,\n uint16_t length)\n{\n if (NMEA_QUEUE_SIZE == queue->size)\n {\n return; // queue is full, throw this message away\n }\n\n if (length > NMEA_MAX_MESSAGE_LENGTH)\n {\n return; // message is to long, throw this message away\n }\n\n if (queue->size)\n {\n queue->last += 1;\n }\n\n if (NMEA_QUEUE_SIZE == queue->last)\n {\n queue->last = 0;\n }\n\n memcpy(queue->messages[queue->last], message, length);\n queue->message_lengths[queue->last] = length;\n\n queue->size += 1;\n}\n\nuint16_t nmea_queue_size(nmea_queue_t queue)\n{\n return queue->size;\n}\n\nchar * nmea_queue_peek(nmea_queue_t queue)\n{\n return queue->messages[queue->first];\n}\n\nuint16_t nmea_queue_peek_length(nmea_queue_t queue)\n{\n return queue->message_lengths[queue->first];\n}\n\nvoid nmea_queue_pop(nmea_queue_t queue)\n{\n if (!queue->size)\n {\n return; // Queue is already empty\n }\n\n if (1 != queue->size)\n {\n queue->first += 1;\n }\n\n queue->size -= 1;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n" }, { "alpha_fraction": 0.4588877558708191, "alphanum_fraction": 0.48038211464881897, "avg_line_length": 24.701753616333008, "blob_id": "48922b4a3340a026462f0a3ff18c65664b806893", "content_id": "4b552a0ffab149fb78e18f2f1e77842ba705a91f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2931, "license_type": "no_license", "max_line_length": 80, "num_lines": 114, "path": "/life_jacket.X/inc/lora/rfm95w.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef RFM95W_H\n#define RFM95W_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n#define RFM95W_RX_BUFFER_SIZE (256)\n\ntypedef struct rfm95w_buffer_t\n{\n uint8_t data[RFM95W_RX_BUFFER_SIZE];\n uint8_t length;\n} rfm95w_buffer_t;\n\ntypedef struct rfm95w_ack_parameters_t\n{\n bool send_ack;\n bool wait_for_ack;\n\n bool was_valid_ack;\n} rfm95w_ack_parameters_t;\n\n\ntypedef void (*rfmw95w_received_message_callback_t)(\n const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr,\n rfm95w_ack_parameters_t * ack_parameters,\n rfm95w_buffer_t * ack);\n\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n#define RFM95W_MAX_RETRANSMISSION_COUNT (3)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Initializes the RFM95W module.\n*/\nvoid rfm95w_init(void);\n\n/**\n * @brief Registers a callback for handling received messages.\n * @param callback - Callback to register.\n */\nvoid rfm95w_register_received_message_callback(\n rfmw95w_received_message_callback_t callback);\n\n/**\n * @brief Checks if the RFM95W is in an idle state.\n * @return True if in idle.\n */\nbool rfm95w_is_idle(void);\n\n/**\n * @brief Appends data to the TX FIFO.\n * @param data - Data to append to the FIFO.\n * @param length - Number of bytes to append to the FIFO.\n * @param offset - Number of bytes from tx fifo base address to start\n * writing at.\n */\nvoid rfm95w_write_tx_fifo(const uint8_t * data,\n uint8_t length,\n uint8_t offset);\n\n/**\n * @brief Clears the TX FIFO.\n */\nvoid rfm95w_clear_tx_fifo(void);\n\n/**\n * @brief Starts a TX session.\n */\nvoid rfm95w_start_tx(uint8_t max_retransmissions, bool wait_for_ack);\n\n/**\n * @brief Starts a RX session which ends at complete packet reception\n * or after the rx time window has passed.\n */\nvoid rfm95w_start_single_rx(void);\n\n/**\n * @brief Starts a continuous RX session.\n */\nvoid rfm95w_start_continuous_rx(void);\n\nvoid rfmw_send_cw(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* RFM95W_H */\n\n" }, { "alpha_fraction": 0.5176995992660522, "alphanum_fraction": 0.5559535026550293, "avg_line_length": 26.76224708557129, "blob_id": "e232b4e0245d6b1371e231288fe43de3cfcccc32", "content_id": "3126005a7873c8d3942390aaa8306d7578fe8642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 19266, "license_type": "no_license", "max_line_length": 90, "num_lines": 694, "path": "/life_jacket.X/src/lora/rfm95w.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"lora\\rfm95w.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"lora/rfm95w_io.h\"\n#include \"uart/debug_log.h\"\n#include \"hal/uart.h\"\n#include \"hal/gpio.h\"\n#include \"hal/clock.h\"\n#include \"hal/flash.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\ntypedef enum\n{\n RFM95W_RADIO_STATE_IDLE,\n RFM95W_RADIO_STATE_TX,\n RFM95W_RADIO_STATE_RX_SINGLE,\n RFM95W_RADIO_STATE_RX_CONTINUOUS\n} rfm95w_radio_state_t;\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\nstatic const uint8_t RFM95W_REG_INIT_TABLE[][2] =\n{\n {RFM95W_REG_PA_CONFIG, 0xCC}, // Output power = 14dBm\n {RFM95W_REG_PA_RAMP, 0x09}, // Default value (40 us)\n {RFM95W_REG_LNA, 0x20}, // Max LNA gain\n {RFM95W_REG_FIFO_ADDR_PTR, 0x00},\n {RFM95W_REG_FIFO_TX_BASE_ADDR, 0x80},\n {RFM95W_REG_FIFO_RX_BASE_ADDR, 0x00},\n {RFM95W_REG_IRQ_FLAGS_MASK, 0x00},\n {RFM95W_REG_IRQ_FLAGS, 0xFF}, // Clear by writing 1's\n {RFM95W_REG_MODEM_CONFIG1, (RFM95W_BW_125K << 4) |\n (RFM95W_CODING_RATE_4_5 << 1) |\n RFM95W_EXPLICIT_HEADER_MODE},\n {RFM95W_REG_MODEM_CONFIG2, (RFM95W_SPREADING_FACTOR_128_CHIPS << 4) |\n (RFM95W_TX_NORMAL_MODE << 3) |\n (RFM95W_PAYLOAD_CRC_DISABLE << 2) |\n (0 << 0)}, // rx time-out msb\n {RFM95W_REG_SYMB_TIMEOUT_LSB, 0x40},\n {RFM95W_REG_PREAMBLE_MSB, 0x00},\n {RFM95W_REG_PREAMBLE_LSB, 0x08},\n {RFM95W_REG_PAYLOAD_LENGTH, 0x01},\n {RFM95W_REG_MAX_PAYLOAD_LENGTH, 0xFF},\n {RFM95W_REG_HOP_PERIOD, 0x00}, // Frequency hopping disabled\n {RFM95W_REG_MODEM_CONFIG3, 0x00}, // Data rate is under 16ms, LNA \n // gain set by LnaGain regsister\n};\n\n#define RFM95W_REG_INIT_TABLE_LEN (sizeof(RFM95W_REG_INIT_TABLE) / 2)\n\nstatic const uint8_t RFM95W_SILICON_VERSION = 0x12;\n\nstatic const uint16_t SINGLE_RX_TIMEOUT_SYMBOLS = 1023; // must be in [4, 1023]\n\nstatic const uint8_t RFM95W_ACK_RETRANSMISSION_COUNT = 1;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic rfmw95w_received_message_callback_t received_message_callback;\n\nstatic volatile uint8_t tx_fifo_size = 0;\n\nstatic volatile rfm95w_radio_state_t radio_state = RFM95W_RADIO_STATE_IDLE;\nstatic volatile uint8_t rfm95w_retransmission_count;\nstatic volatile bool wait_for_ack;\n\nstatic volatile rfm95w_buffer_t rx_buffer;\nstatic volatile rfm95w_buffer_t tx_buffer;\n\nstatic bool contiuous_mode = false;\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n//\n// Event handlers\n//\n\n/**\n * @brief Handles the TX done event.\n */\nstatic void handle_tx_done(void);\n\n/**\n * @brief Handles the RX done event.\n */\nstatic void handle_rx_done(void);\n\n/**\n * @brief Handles the RX done event while in contiuous RX mode.\n */\nstatic void handle_continuous_rx_packet(void);\n\n/**\n * @brief Handles the RX timeout event.\n */\nstatic void handle_rx_timeout(void);\n\n//\n//\n//\n\n/**\n * @brief Reads the contents of the RX fifo into RAM.\n */\nstatic void rfm95w_read_fifo(void);\n\n/**\n * @brief Ends a RX session.\n */\nstatic void rfm95w_end_rx(void);\n\n/**\n * @brief Ends a TX session.\n */\nstatic void rfm95w_end_tx(void);\n\n/**\n * @brief Starts a retransmission.\n */\nstatic void rfm95w_start_retransmission(void);\n\n/**\n * @brief Fills up the TX fifo for a retransmission.\n */\nstatic void rfm95w_refill_tx_fifo(void);\n\n/**\n * @Brief Configures the channel settings according to the data stored\n * in the flash memory.\n */\nstatic void rfm95w_write_settings_from_flash(void);\n\n/**\n * @brief Configures DIO0 to generate an interrupt when the message has been\n * transitted.\n */\nstatic void rfm95w_setup_dio0_for_tx_done(void);\n\n/**\n * @brief Configures DIO0 to generate an interrupt when a message has been\n * received.\n */\nstatic void rfm95w_setup_dio0_for_rx_done(void);\n\n/**\n * @brief Configures DIO1 ti generate an interrupt when a fixed time\n * RX session times out.\n */\nstatic void rfm95w_setup_dio1_for_rx_timeout(void);\n\n//\n// DIO interrupts\n//\n\n/**\n * @brief Callback for the DIO0 change notification callback.\n */\nstatic void rfmw_dio0_callback(bool rising);\n\n/**\n * @brief Callback for the DIO1 change notification callback.\n */\nstatic void rfmw_dio1_callback(bool rising);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid rfm95w_init(void)\n{\n uint8_t i;\n\n radio_state = RFM95W_RADIO_STATE_IDLE;\n rx_buffer.length = 0;\n received_message_callback = NULL;\n rfm95w_retransmission_count = 0;\n wait_for_ack = false;\n memset((void*)&rx_buffer, sizeof(rfm95w_buffer_t), 0);\n memset((void*)&tx_buffer, sizeof(rfm95w_buffer_t), 0);\n\n while (clock_get_msec() <= 10)\n {\n ; // The user should wait for 10 ms from of the end of the POR cycle\n // before commencing communications over the SPI bus\n }\n\n LORA_RESET_OFF;\n\n if (RFM95W_SILICON_VERSION == rfm95w_io_read(RFM95W_REG_VERSION))\n {\n debug_log_append_line(\"RFM95W communication ok\");\n }\n else\n {\n debug_log_append_line(\"RFM95W communication failiure\");\n return;\n }\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_SLEEP);\n\n // LORA mode, high frequency mode\n rfm95w_io_write(RFM95W_REG_OP_MODE, 0x80);\n\n for (i = 0; i != RFM95W_REG_INIT_TABLE_LEN; ++i)\n {\n rfm95w_io_write(RFM95W_REG_INIT_TABLE[i][0],\n RFM95W_REG_INIT_TABLE[i][1]);\n }\n\n rfm95w_write_settings_from_flash();\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_STAND_BY);\n}\n\nvoid rfm95w_register_received_message_callback(\n rfmw95w_received_message_callback_t callback)\n{\n received_message_callback = callback;\n}\n\nbool rfm95w_is_idle(void)\n{\n return (RFM95W_RADIO_STATE_IDLE == radio_state);\n}\n\nvoid rfm95w_write_tx_fifo(const uint8_t * data,\n uint8_t length,\n uint8_t offset)\n{\n uint8_t tx_fifo_base;\n uint8_t i;\n\n tx_fifo_base = rfm95w_io_read(RFM95W_REG_FIFO_TX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, tx_fifo_base + offset);\n\n for (i = 0; i != length; ++i)\n {\n rfm95w_io_write(RFM95W_REG_FIFO, *data);\n tx_buffer.data[offset + i] = *data;\n\n ++data;\n }\n\n if (offset + length > tx_fifo_size)\n {\n tx_fifo_size = offset + length;\n\n rfm95w_io_write(RFM95W_REG_PAYLOAD_LENGTH, tx_fifo_size);\n tx_buffer.length = tx_fifo_size;\n }\n}\n\nvoid rfm95w_clear_tx_fifo(void)\n{\n tx_fifo_size = 0;\n rfm95w_io_write(RFM95W_REG_PAYLOAD_LENGTH, 1);\n}\n\nvoid rfm95w_start_tx(uint8_t max_retransmissions, bool iwait_for_ack)\n{\n rfm95w_io_clear_all_irqs();\n rfm95w_setup_dio0_for_tx_done();\n\n radio_state = RFM95W_RADIO_STATE_TX;\n rfm95w_retransmission_count = max_retransmissions - 1;\n wait_for_ack = iwait_for_ack;\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_TX);\n}\n\nvoid rfm95w_start_single_rx(void)\n{\n uint8_t rx_base_addr;\n\n rx_base_addr = rfm95w_io_read(RFM95W_REG_FIFO_RX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, rx_base_addr);\n\n rfm95w_io_set_single_rx_timeout(SINGLE_RX_TIMEOUT_SYMBOLS);\n\n rfm95w_io_clear_all_irqs();\n rfm95w_setup_dio0_for_rx_done();\n rfm95w_setup_dio1_for_rx_timeout();\n\n radio_state = RFM95W_RADIO_STATE_RX_SINGLE;\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_RX_SINGLE);\n}\n\nvoid rfm95w_start_continuous_rx(void)\n{\n uint8_t rx_base_addr;\n\n rx_base_addr = rfm95w_io_read(RFM95W_REG_FIFO_RX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, rx_base_addr);\n\n rfm95w_io_set_single_rx_timeout(SINGLE_RX_TIMEOUT_SYMBOLS);\n\n rfm95w_io_clear_all_irqs();\n rfm95w_setup_dio0_for_rx_done();\n\n radio_state = RFM95W_RADIO_STATE_RX_CONTINUOUS;\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_RX_CONT);\n\n contiuous_mode = true;\n}\n\nvoid rfmw_send_cw(void)\n{\n uint8_t modem_config_2;\n\n modem_config_2 = rfm95w_io_read(RFM95W_REG_MODEM_CONFIG2);\n modem_config_2 |= RFM95W_TX_CONTINUOUS_MODE << 3;\n rfm95w_io_write(RFM95W_REG_MODEM_CONFIG2, modem_config_2);\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_TX);\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void handle_tx_done(void)\n{\n if (wait_for_ack)\n {\n rfm95w_start_single_rx();\n }\n else\n {\n if (0 != rfm95w_retransmission_count)\n {\n --rfm95w_retransmission_count;\n rfm95w_end_tx();\n rfm95w_start_retransmission();\n }\n else\n {\n if (contiuous_mode)\n {\n rfm95w_start_continuous_rx();\n }\n }\n }\n}\n\nstatic void handle_rx_done(void)\n{\n uint8_t reg_irq_flags;\n\n reg_irq_flags = rfm95w_io_read(RFM95W_REG_IRQ_FLAGS);\n\n rfm95w_io_clear_all_irqs();\n\n if (reg_irq_flags & RFM95W_IRQ_FLAG_PAYLOAD_CRC_ERROR_MASK)\n {\n handle_rx_timeout();\n }\n else\n {\n int16_t rssi;\n uint8_t snr;\n\n rfm95w_read_fifo();\n\n rssi = -137 + (int16_t)rfm95w_io_read(RFM95W_REG_PKT_RSSI_VALUE);\n snr = rfm95w_io_read(RFM95W_REG_PKT_SNR_VALUE);\n\n rfm95w_end_rx();\n\n if (NULL != received_message_callback)\n {\n rfm95w_ack_parameters_t ack_parameters;\n\n received_message_callback((uint8_t*)rx_buffer.data,\n rx_buffer.length,\n rssi,\n snr,\n &ack_parameters,\n (rfm95w_buffer_t*)&tx_buffer);\n\n if (ack_parameters.send_ack)\n {\n rfm95w_clear_tx_fifo();\n rfm95w_write_tx_fifo((const uint8_t *)tx_buffer.data,\n tx_buffer.length,\n 0);\n\n rfm95w_start_tx(RFM95W_ACK_RETRANSMISSION_COUNT,\n ack_parameters.wait_for_ack);\n }\n else if (!ack_parameters.was_valid_ack)\n {\n handle_rx_timeout();\n }\n else if (contiuous_mode)\n {\n rfm95w_start_continuous_rx();\n }\n }\n else\n {\n handle_rx_timeout();\n }\n }\n}\n\nstatic void handle_continuous_rx_packet(void)\n{\n uint8_t reg_irq_flags;\n uint8_t rx_base_addr;\n\n reg_irq_flags = rfm95w_io_read(RFM95W_REG_IRQ_FLAGS);\n\n rx_base_addr = rfm95w_io_read(RFM95W_REG_FIFO_RX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, rx_base_addr);\n\n rfm95w_io_clear_all_irqs();\n\n if (reg_irq_flags & RFM95W_IRQ_FLAG_PAYLOAD_CRC_ERROR_MASK)\n {\n debug_log_append_line(\"CRC error on received packet in contiuous rx.\");\n }\n else\n {\n int16_t rssi;\n uint8_t snr;\n\n rfm95w_read_fifo();\n\n rssi = -137 + (int16_t)rfm95w_io_read(RFM95W_REG_PKT_RSSI_VALUE);\n snr = rfm95w_io_read(RFM95W_REG_PKT_SNR_VALUE);\n\n if (NULL != received_message_callback)\n {\n rfm95w_ack_parameters_t ack_parameters;\n\n received_message_callback((uint8_t*)rx_buffer.data,\n rx_buffer.length,\n rssi,\n snr,\n &ack_parameters,\n (rfm95w_buffer_t*)&tx_buffer);\n\n if (ack_parameters.send_ack)\n {\n rfm95w_end_rx();\n rx_base_addr = rfm95w_io_read(RFM95W_REG_FIFO_RX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, rx_base_addr);\n rfm95w_io_clear_all_irqs();\n\n rfm95w_clear_tx_fifo();\n rfm95w_write_tx_fifo((const uint8_t *)tx_buffer.data,\n tx_buffer.length,\n 0);\n\n rfm95w_start_tx(RFM95W_ACK_RETRANSMISSION_COUNT,\n ack_parameters.wait_for_ack);\n }\n }\n }\n}\n\nstatic void handle_rx_timeout(void)\n{\n if (0 != rfm95w_retransmission_count)\n {\n --rfm95w_retransmission_count;\n rfm95w_start_retransmission();\n }\n else\n {\n rfm95w_end_rx();\n\n if (contiuous_mode)\n {\n rfm95w_start_continuous_rx();\n }\n }\n}\n\nstatic void rfm95w_read_fifo(void)\n{\n uint8_t length;\n uint8_t i;\n uint8_t fifo_rx_current_address;\n\n length = rfm95w_io_read(RFM95W_REG_RX_NBR_BYTES);\n fifo_rx_current_address = rfm95w_io_read(RFM95W_REG_FIFO_RX_CURRENT_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, fifo_rx_current_address);\n\n for (i = 0; i != length; ++i)\n {\n rx_buffer.data[i] = rfm95w_io_read(RFM95W_REG_FIFO);\n }\n\n rx_buffer.length = length;\n}\n\nstatic void rfm95w_end_rx(void)\n{\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO0, false);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO1, false);\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_STAND_BY);\n radio_state = RFM95W_RADIO_STATE_IDLE;\n}\n\nstatic void rfm95w_end_tx(void)\n{\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO0, false);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO1, false);\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_STAND_BY);\n radio_state = RFM95W_RADIO_STATE_IDLE;\n}\n\nstatic void rfm95w_start_retransmission(void)\n{\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO0, false);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO1, false);\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_STAND_BY);\n\n rfm95w_io_clear_all_irqs();\n rfm95w_setup_dio0_for_tx_done();\n\n radio_state = RFM95W_RADIO_STATE_TX;\n\n rfm95w_refill_tx_fifo();\n\n rfm95w_io_set_operating_mode(RFM95W_OP_MODE_TX);\n}\n\nstatic void rfm95w_refill_tx_fifo(void)\n{\n uint8_t tx_fifo_base;\n uint8_t i;\n\n tx_fifo_base = rfm95w_io_read(RFM95W_REG_FIFO_TX_BASE_ADDR);\n rfm95w_io_write(RFM95W_REG_FIFO_ADDR_PTR, tx_fifo_base);\n\n for (i = 0; i != tx_buffer.length; ++i)\n {\n rfm95w_io_write(RFM95W_REG_FIFO, tx_buffer.data[i]);\n }\n\n rfm95w_io_write(RFM95W_REG_PAYLOAD_LENGTH, tx_buffer.length);\n}\n\nstatic void rfm95w_write_settings_from_flash(void)\n{\n rfm95w_modem_cfg_bw_t bandwidth;\n rfm95w_coding_rate_t coding_rate;\n rfm95w_spreading_factor_t spreading_factor;\n rfm95w_channel_frequency_t frequency;\n\n if (!flash_read_byte(FLASH_INDEX_LORA_PARAMS_INITIALIZED))\n {\n flash_init_write_buffer();\n\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_BANDWIDTH,\n RFM95W_BW_125K);\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_CODING_RATE,\n RFM95W_CODING_RATE_4_5);\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_SPREADING_FACTOR,\n RFM95W_SPREADING_FACTOR_128_CHIPS);\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_FREQUENCY,\n RFM95W_CHANNEL_FREQUENCY_868_1);\n flash_write_byte_to_buffer(FLASH_INDEX_LORA_PARAMS_INITIALIZED,\n 1);\n\n flash_write_buffer_to_flash();\n }\n\n bandwidth = flash_read_byte(FLASH_INDEX_LORA_BANDWIDTH);\n coding_rate = flash_read_byte(FLASH_INDEX_LORA_CODING_RATE);\n spreading_factor = flash_read_byte(FLASH_INDEX_LORA_SPREADING_FACTOR);\n frequency = flash_read_byte(FLASH_INDEX_LORA_FREQUENCY);\n\n rfm95w_io_set_bandwidth(bandwidth);\n rfm95w_io_set_coding_rate(coding_rate);\n rfm95w_io_set_speading_factor(spreading_factor);\n rfm95w_io_set_frequency(frequency);\n}\n\nstatic void rfm95w_setup_dio0_for_tx_done(void)\n{\n uint8_t reg_irq_masks;\n\n reg_irq_masks = rfm95w_io_read(RFM95W_REG_IRQ_FLAGS_MASK);\n reg_irq_masks &= ~RFM95W_IRQ_FLAG_TX_DONE_MASK;\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS_MASK, reg_irq_masks);\n\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS, RFM95W_IRQ_FLAG_TX_DONE_MASK); // clear irq\n\n rfm95w_io_set_dio_function(0, RFM95W_DIO0_FUNC_TX_DONE);\n gpio_register_cn_handler(GPIO_CN_PIN_LORA_DIO0, rfmw_dio0_callback);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO0, true);\n}\n\nstatic void rfm95w_setup_dio0_for_rx_done(void)\n{\n uint8_t reg_irq_masks;\n\n reg_irq_masks = rfm95w_io_read(RFM95W_REG_IRQ_FLAGS_MASK);\n reg_irq_masks &= ~RFM95W_IRQ_FLAG_RX_DONE_MASK;\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS_MASK, reg_irq_masks);\n\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS, RFM95W_IRQ_FLAG_RX_DONE_MASK); // clear irq\n\n rfm95w_io_set_dio_function(0, RFM95W_DIO0_FUNC_RX_DONE);\n gpio_register_cn_handler(GPIO_CN_PIN_LORA_DIO0, rfmw_dio0_callback);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO0, true);\n}\n\nstatic void rfm95w_setup_dio1_for_rx_timeout(void)\n{\n uint8_t reg_irq_masks;\n\n reg_irq_masks = rfm95w_io_read(RFM95W_REG_IRQ_FLAGS_MASK);\n reg_irq_masks &= ~RFM95W_IRQ_FLAG_RX_TIMEOUT_MASK;\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS_MASK, reg_irq_masks);\n\n rfm95w_io_write(RFM95W_REG_IRQ_FLAGS, RFM95W_IRQ_FLAG_RX_TIMEOUT_MASK); // clear irq\n\n rfm95w_io_set_dio_function(1, RFM95W_DIO1_FUNC_RX_TIMEOUT);\n gpio_register_cn_handler(GPIO_CN_PIN_LORA_DIO1, rfmw_dio1_callback);\n gpio_enable_cn(GPIO_CN_PIN_LORA_DIO1, true);\n}\n\nstatic void rfmw_dio0_callback(bool rising)\n{\n if (rising)\n {\n if (RFM95W_RADIO_STATE_TX == radio_state)\n {\n handle_tx_done();\n }\n else if (RFM95W_RADIO_STATE_RX_SINGLE == radio_state)\n {\n handle_rx_done();\n }\n else if (RFM95W_RADIO_STATE_RX_CONTINUOUS == radio_state)\n {\n handle_continuous_rx_packet();\n }\n else\n {\n ;\n }\n }\n else\n {\n ;\n }\n}\n\nstatic void rfmw_dio1_callback(bool rising)\n{\n if (rising)\n {\n if (RFM95W_RADIO_STATE_RX_SINGLE == radio_state)\n {\n handle_rx_timeout();\n }\n }\n else\n {\n ;\n }\n}" }, { "alpha_fraction": 0.49067631363868713, "alphanum_fraction": 0.5171166062355042, "avg_line_length": 25.134546279907227, "blob_id": "6c4d0b0c3683b373759c59fafad5678d4019a95c", "content_id": "2583a4fea17b711539cd51346f5ae392e2c2f961", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7186, "license_type": "no_license", "max_line_length": 88, "num_lines": 275, "path": "/life_jacket.X/src/audio/ext_flash.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"audio/ext_flash.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n#include <stdio.h>\n\n#include <xc.h>\n\n#include \"hal/gpio.h\"\n#include \"hal/spi_hal.h\"\n#include \"hal/uart.h\"\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\nstatic const uint8_t RDID_MANUFACTURER_ID = 0xC2;\nstatic const uint16_t RDID_DEVICE_ID = 0x2817;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void ext_flash_read_id(uint8_t * manufacturer_id, uint16_t * device_id);\n\nstatic void ext_flash_write_enable(void);\n\nstatic uint8_t ext_flash_read_status_register(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid ext_flash_init(void)\n{\n uint8_t manufacturer_id;\n uint16_t device_id;\n\n ext_flash_read_id(&manufacturer_id, &device_id);\n\n if (RDID_MANUFACTURER_ID != manufacturer_id)\n {\n sprintf(g_uart_string_buffer,\n \"External flash - invalid manufacturer id. Was 0x%02X should be 0x%02X\",\n manufacturer_id,\n RDID_MANUFACTURER_ID);\n debug_log_append_line(g_uart_string_buffer);\n \n return;\n }\n\n if (RDID_DEVICE_ID != device_id)\n {\n sprintf(g_uart_string_buffer,\n \"External flash - invalid device id. Was 0x%04X should be 0x%04X\",\n device_id,\n RDID_DEVICE_ID);\n debug_log_append_line(g_uart_string_buffer);\n\n return;\n }\n\n sprintf(g_uart_string_buffer,\n \"External flash communication ok\");\n debug_log_append_line(g_uart_string_buffer);\n}\n\nvoid ext_flash_read(void * destination, uint32_t address, uint32_t length)\n{\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ);\n\n (void)spi_hal_tranceive8((uint8_t)(address >> 16));\n (void)spi_hal_tranceive8((uint8_t)(address >> 8));\n (void)spi_hal_tranceive8((uint8_t)(address >> 0));\n\n spi_hal_read16_block(destination, length);\n\n FLASH_CS_OFF;\n}\n\nuint8_t ext_flash_read_byte(uint32_t address)\n{\n uint8_t read_byte;\n\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ);\n\n (void)spi_hal_tranceive8((uint8_t)(address >> 16));\n (void)spi_hal_tranceive8((uint8_t)(address >> 8));\n (void)spi_hal_tranceive8((uint8_t)(address >> 0));\n\n read_byte = spi_hal_tranceive8(0);\n\n FLASH_CS_OFF;\n\n return read_byte;\n}\n\nuint16_t ext_flash_read_word(uint32_t address)\n{\n uint16_t read_word;\n\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ);\n\n (void)spi_hal_tranceive8((uint8_t)(address >> 16));\n (void)spi_hal_tranceive8((uint8_t)(address >> 8));\n (void)spi_hal_tranceive8((uint8_t)(address >> 0));\n\n read_word = ((uint16_t)spi_hal_tranceive8(0)) << 8;\n read_word |= ((uint16_t)spi_hal_tranceive8(0)) << 0;\n \n FLASH_CS_OFF;\n\n return read_word;\n}\n\nuint32_t ext_flash_read_dword(uint32_t address)\n{\n uint32_t read_dword;\n\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ);\n\n (void)spi_hal_tranceive8((uint8_t)(address >> 16));\n (void)spi_hal_tranceive8((uint8_t)(address >> 8));\n (void)spi_hal_tranceive8((uint8_t)(address >> 0));\n\n read_dword = ((uint32_t)spi_hal_tranceive8(0)) << 24;\n read_dword |= ((uint32_t)spi_hal_tranceive8(0)) << 16;\n read_dword |= ((uint32_t)spi_hal_tranceive8(0)) << 8;\n read_dword |= ((uint32_t)spi_hal_tranceive8(0)) << 0;\n \n FLASH_CS_OFF;\n\n return read_dword;\n}\n\nvoid ext_flash_chip_erase(void)\n{\n ext_flash_write_enable();\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_CHIP_ERASE);\n\n FLASH_CS_OFF;\n\n debug_log_append_line(\"Chip erase started!\");\n\n while (ext_flash_is_write_in_progress())\n {\n ClrWdt();\n }\n\n debug_log_append_line(\"Chip erase done!\");\n}\n\nbool ext_flash_is_write_in_progress(void)\n{\n return ((ext_flash_read_status_register() & 0x01) != 0);\n}\n\nvoid ext_flash_program_page(const void * data, uint32_t address)\n{\n uint16_t i;\n const uint16_t * data_pointer = data;\n\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n while (ext_flash_is_write_in_progress())\n {\n ;\n }\n\n ext_flash_write_enable();\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_PAGE_PROGRAM);\n\n (void)spi_hal_tranceive8((uint8_t)(address >> 16));\n (void)spi_hal_tranceive8((uint8_t)(address >> 8));\n (void)spi_hal_tranceive8((uint8_t)(address >> 0));\n\n for (i = 0; i != EXT_FLASH_PAGE_LENGTH / 2; ++i)\n {\n (void)spi_hal_tranceive8(data_pointer[i] >> 8);\n (void)spi_hal_tranceive8(data_pointer[i] >> 0);\n }\n\n FLASH_CS_OFF;\n\n while (ext_flash_is_write_in_progress())\n {\n ;\n }\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void ext_flash_read_id(uint8_t * manufacturer_id, uint16_t * device_id)\n{\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ_ID);\n\n *manufacturer_id = spi_hal_tranceive8(0);\n *device_id = ((uint16_t)spi_hal_tranceive8(0)) << 8;\n *device_id |= spi_hal_tranceive8(0);\n\n FLASH_CS_OFF;\n}\n\nstatic void ext_flash_write_enable(void)\n{\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_WRITE_ENABLE);\n\n FLASH_CS_OFF;\n}\n\nstatic uint8_t ext_flash_read_status_register(void)\n{\n uint8_t status_register = 0x0000;\n\n spi_hal_setup_for_device(SPI_DEVICE_EXT_FLASH);\n\n FLASH_CS_ON;\n\n (void)spi_hal_tranceive8((uint8_t)EXT_FLASH_CMD_READ_STATUS);\n\n status_register = spi_hal_tranceive8(0);\n\n FLASH_CS_OFF;\n\n return status_register;\n}" }, { "alpha_fraction": 0.4653937816619873, "alphanum_fraction": 0.46857598423957825, "avg_line_length": 26.615385055541992, "blob_id": "411fa34d2d54a7f2fd30c99ae3b4ce1d882bc75a", "content_id": "61ec980351f01d0731b33d3ae29ac56b32d66449", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2514, "license_type": "no_license", "max_line_length": 80, "num_lines": 91, "path": "/life_jacket.X/inc/gps/nmea_queue.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef NMEA_QUEUE_H\n#define NMEA_QUEUE_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef struct nmea_queue_struct_t * nmea_queue_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n#define NMEA_MAX_MESSAGE_LENGTH (85)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Initializes the NMEA queue.\n*/\nvoid nmea_queue_init(nmea_queue_t queue);\n\n/**\n @brief Gets the NMEA RX queue.\n @return RX queue.\n*/\nnmea_queue_t nmea_queue_get_rx_queue(void);\n\n/**\n @brief Gets the NMEA TX queue.\n @return TX queue.\n*/\nnmea_queue_t nmea_queue_get_tx_queue(void);\n\n/**\n @brief Appends a NMEA message to the queue.\n @param queue - Queue to append to\n @param message - Message to append to the queue\n @param length - Number of characters in message\n*/\nvoid nmea_queue_append(nmea_queue_t queue, char * message, uint16_t length);\n\n/**\n @brief Gets the number of elements in the queue.\n @param queue - Queue to get the size of\n @return Number of elements in the RX queue.\n*/\nuint16_t nmea_queue_size(nmea_queue_t queue);\n\n/**\n @breif Gets the first message in the queue.\n @param queue - Queue to peek in\n @return First message in the queue.\n*/\nchar * nmea_queue_peek(nmea_queue_t queue);\n\n/**\n @brief Gets the length of the first message in the queue.\n @param queue - Queue to peek in\n @return Length of the first message in the RX queue.\n*/\nuint16_t nmea_queue_peek_length(nmea_queue_t queue);\n\n/**\n @brief Removes the first element from the queue.\n @param queue - Queue to pop\n*/\nvoid nmea_queue_pop(nmea_queue_t queue);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* NMEA_QUEUE_H */\n\n" }, { "alpha_fraction": 0.4733833968639374, "alphanum_fraction": 0.5045388340950012, "avg_line_length": 32.29850769042969, "blob_id": "2be721a35296af2e5cb3b0af91baec036a5b3078", "content_id": "99e81c54c755c0808da89e3ddd43bbca42fab8ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8923, "license_type": "no_license", "max_line_length": 88, "num_lines": 268, "path": "/life_jacket.X/src/lora/p2pc_protocol.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"lora\\p2pc_protocol.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"lora/rfm95w.h\"\n#include \"uart/debug_log.h\"\n#include \"hal/uart.h\"\n#include \"lora/lora_tx_queue.h\"\n#include \"gps/gps.h\"\n#include \"hal/flash.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\nstatic const uint32_t P2P_BROADCAST_ADDRESS = 0xFFFFFFFF;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic volatile uint8_t frame_number;\nstatic volatile uint8_t last_sent_frame_number;\nstatic uint32_t my_address;\nstatic bool initialized = false;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void p2pc_handle_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr,\n rfm95w_ack_parameters_t * ack_parameters,\n rfm95w_buffer_t * ack);\n\nstatic void p2pc_write_header(uint8_t * data,\n const p2p_frame_header_t * header);\n\nstatic uint32_t p2pc_parse_radio_code(const uint8_t * data);\n\nstatic void p2pc_print_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr);\n\nstatic bool p2pc_handle_ack(const uint8_t * data, uint8_t length);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid p2pc_protocol_init(void)\n{\n rfm95w_register_received_message_callback(p2pc_handle_received_message);\n\n frame_number = 0;\n my_address = flash_read_dword(FLASH_INDEX_LORA_ADDRESS_MSB);\n\n initialized = true;\n debug_log_append_line(\"LORA P2PC initialized\");\n}\n\nvoid p2pc_protocol_broadcast_gps_position(void)\n{\n uint8_t message[32];\n p2p_frame_header_t header;\n lora_tx_queue_element_t queue_element;\n\n const nmea_coordinates_info_t * coordinates;\n uint8_t * minutes_pointer;\n\n header.source_address = my_address;\n header.destination_address = P2P_BROADCAST_ADDRESS;\n header.frame_number = frame_number;\n header.time_to_live = 15;\n header.protocol = 1;\n header.data_type = P2P_DATA_TYPE_GPS_POSITION;\n\n last_sent_frame_number = header.frame_number;\n frame_number += 1;\n\n p2pc_write_header(message, &header);\n\n //\n // Fill in GPS data here\n //\n coordinates = gps_get_coordinates();\n\n if (NULL == coordinates)\n {\n message[P2P_INDEX_APPLICATION + 0] = 0;\n message[P2P_INDEX_APPLICATION + 1] = 0;\n message[P2P_INDEX_APPLICATION + 2] = 0;\n message[P2P_INDEX_APPLICATION + 3] = 0;\n message[P2P_INDEX_APPLICATION + 4] = 0;\n message[P2P_INDEX_APPLICATION + 5] = 0;\n message[P2P_INDEX_APPLICATION + 6] = 0;\n message[P2P_INDEX_APPLICATION + 7] = 0;\n message[P2P_INDEX_APPLICATION + 8] = 0;\n message[P2P_INDEX_APPLICATION + 9] = 0;\n message[P2P_INDEX_APPLICATION + 10] = 0;\n message[P2P_INDEX_APPLICATION + 11] = 0;\n message[P2P_INDEX_APPLICATION + 12] = 0;\n message[P2P_INDEX_APPLICATION + 13] = 0;\n message[P2P_INDEX_APPLICATION + 14] = 0;\n }\n else\n {\n message[P2P_INDEX_APPLICATION + 0] = (uint8_t)(coordinates->latitude_deg >> 8);\n message[P2P_INDEX_APPLICATION + 1] = (uint8_t)coordinates->latitude_deg;\n\n if (coordinates->latitude_north)\n {\n message[P2P_INDEX_APPLICATION + 0] |= 0x80;\n }\n\n message[P2P_INDEX_APPLICATION + 2] = (uint8_t)(coordinates->longitude_deg >> 8);\n message[P2P_INDEX_APPLICATION + 3] = (uint8_t)coordinates->longitude_deg;\n\n if (coordinates->longitude_east)\n {\n message[P2P_INDEX_APPLICATION + 2] |= 0x80;\n }\n\n minutes_pointer = (uint8_t*)&(coordinates->latitude_minutes);\n message[P2P_INDEX_APPLICATION + 4] = *(minutes_pointer + 0);\n message[P2P_INDEX_APPLICATION + 5] = *(minutes_pointer + 1);\n message[P2P_INDEX_APPLICATION + 6] = *(minutes_pointer + 2);\n message[P2P_INDEX_APPLICATION + 7] = *(minutes_pointer + 3);\n\n minutes_pointer = (uint8_t*)&(coordinates->longitude_minutes);\n message[P2P_INDEX_APPLICATION + 8] = *(minutes_pointer + 0);\n message[P2P_INDEX_APPLICATION + 9] = *(minutes_pointer + 1);\n message[P2P_INDEX_APPLICATION + 10] = *(minutes_pointer + 2);\n message[P2P_INDEX_APPLICATION + 11] = *(minutes_pointer + 3);\n\n message[P2P_INDEX_APPLICATION + 12] = coordinates->time_of_fix_hours;\n message[P2P_INDEX_APPLICATION + 13] = coordinates->time_of_fix_minutes;\n message[P2P_INDEX_APPLICATION + 14] = coordinates->time_of_fix_seconds;\n }\n \n queue_element.data = message;\n queue_element.length = P2P_INDEX_APPLICATION + 15;\n lora_tx_queue_append(&queue_element);\n}\n\nbool p2pc_protocol_is_active(void)\n{\n return initialized;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void p2pc_handle_received_message(\n const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr,\n rfm95w_ack_parameters_t * ack_parameters,\n rfm95w_buffer_t * ack)\n{\n uint32_t src;\n uint32_t dst;\n\n ack_parameters->send_ack = false;\n ack_parameters->wait_for_ack = false;\n ack_parameters->was_valid_ack = false;\n\n p2pc_print_received_message(data, length, rssi, snr);\n\n src = p2pc_parse_radio_code(&data[0]);\n dst = p2pc_parse_radio_code(&data[4]);\n\n if ((P2P_BROADCAST_ADDRESS == dst) || (dst == my_address))\n {\n uint8_t data_type = data[P2P_INDEX_DATA_TYPE];\n\n switch (data_type)\n {\n case P2P_DATA_TYPE_ACK:\n ack_parameters->was_valid_ack =\n p2pc_handle_ack(data, length);\n break;\n\n default:\n break;\n }\n }\n}\n\nstatic void p2pc_write_header(uint8_t * data, const p2p_frame_header_t * header)\n{\n data[0] = (uint8_t)(header->source_address >> 24);\n data[1] = (uint8_t)(header->source_address >> 16);\n data[2] = (uint8_t)(header->source_address >> 8);\n data[3] = (uint8_t)(header->source_address >> 0);\n\n data[4] = (uint8_t)(header->destination_address >> 24);\n data[5] = (uint8_t)(header->destination_address >> 16);\n data[6] = (uint8_t)(header->destination_address >> 8);\n data[7] = (uint8_t)(header->destination_address >> 0);\n\n data[8] = header->time_to_live;\n data[9] = header->frame_number;\n data[10] = header->protocol;\n data[11] = header->data_type;\n}\n\nstatic uint32_t p2pc_parse_radio_code(const uint8_t * data)\n{\n uint32_t radio_code = 0x00000000;\n\n radio_code |= ((uint32_t)*data++) << 24;\n radio_code |= ((uint32_t)*data++) << 16;\n radio_code |= ((uint32_t)*data++) << 8;\n radio_code |= ((uint32_t)*data++) << 0;\n\n return radio_code;\n}\n\nstatic void p2pc_print_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr)\n{\n uint8_t i;\n char * p = g_uart_string_buffer;\n\n sprintf(g_uart_string_buffer, \"Received: \");\n p += strlen(g_uart_string_buffer);\n\n for (i = 0; i != length; ++i)\n {\n sprintf(p, \"%02X \", data[i]);\n p += 3;\n }\n\n sprintf(p, \"RSSI = %d SNR = %f dB\", rssi, ((double)(int8_t)snr) / 4);\n\n debug_log_append_line(g_uart_string_buffer);\n}\n\nstatic bool p2pc_handle_ack(const uint8_t * data, uint8_t length)\n{\n return data[P2P_INDEX_FRAME_NUMBER] == last_sent_frame_number;\n}" }, { "alpha_fraction": 0.3983488082885742, "alphanum_fraction": 0.39938080310821533, "avg_line_length": 27.072463989257812, "blob_id": "b29e537f7a719cfe80cffec2a3c77ce00a5e3cb1", "content_id": "2c0401a8d1838d115bfce510449d5d7bcc993b62", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1938, "license_type": "no_license", "max_line_length": 80, "num_lines": 69, "path": "/life_jacket.X/inc/lora/lora_tx_queue.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef LORA_TX_QUEUE_H\n#define LORA_TX_QUEUE_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef struct lora_tx_queue_element_t\n{\n const uint8_t * data;\n uint8_t length;\n} lora_tx_queue_element_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n * @brief Initializes the lora tx queue.\n */\nvoid lora_tx_queue_init(void);\n\n/**\n * @brief Checks if the queue is empty.\n * @return True if the queue is empty.\n */\nbool lora_tx_queue_is_empty(void);\n\n/**\n * @brief Appends a message to the tx queue.\n * @param element - Element to append to the queue.\n */\nvoid lora_tx_queue_append(const lora_tx_queue_element_t * element);\n\n/**\n * @brief Peeks at the first element in the queue.\n * @param element - Returns first element in queue.\n */\nvoid lora_tx_queue_peek(lora_tx_queue_element_t * element);\n\n/**\n * @brief Transmitts the first element and removes it from the queue.\n */\nvoid lora_tx_queue_transmit_and_pop(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* LORA_TX_QUEUE_H */\n\n" }, { "alpha_fraction": 0.4575304388999939, "alphanum_fraction": 0.46462154388427734, "avg_line_length": 24.644268035888672, "blob_id": "3af502a85291780bebb69e72d748bae79ba93f24", "content_id": "5b695882f179876f27704c422ed86c0d236caec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6487, "license_type": "no_license", "max_line_length": 80, "num_lines": 253, "path": "/life_jacket.X/src/main/main.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"status.h\"\n#include \"build_number.h\"\n\n#include \"hal/gpio.h\"\n#include \"hal/flash.h\"\n#include \"hal/clock.h\"\n#include \"hal/uart.h\"\n\n#include \"acc/accelerometer.h\"\n\n#include \"lora/rfm95w.h\"\n#include \"lora/lora_tx_queue.h\"\n#include \"lora/p2pc_protocol.h\"\n#include \"lora/p2ps_protocol.h\"\n\n#include \"audio/ext_flash.h\"\n#include \"audio/pcm1770.h\"\n\n#include \"uart/terminal.h\"\n#include \"uart/debug_log.h\"\n\n#include \"gps/jf2_io.h\"\n#include \"gps/jf2_uart.h\"\n#include \"gps/nmea_queue.h\"\n#include \"gps/nmea.h\"\n#include \"gps/gps.h\"\n\n#include \"audio/audio.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void init(void);\n\nstatic void print_start_message(uint16_t reset_reason);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nint main(int argc, char** argv)\n{\n nmea_queue_t nmea_rx_queue;\n\n init();\n\n nmea_rx_queue = nmea_queue_get_rx_queue();\n\n while (1)\n {\n ClrWdt();\n\n if (status_check(STATUS_UART_RECEIVE_FLAG))\n {\n terminal_handle_uart_event();\n status_clear(STATUS_UART_RECEIVE_FLAG);\n }\n\n if (status_check(STATUS_GPS_ON_EVENT))\n {\n status_clear(STATUS_GPS_ON_EVENT);\n debug_log_append_line(\"Sending GPS on pulse\");\n jf2_io_send_on_pulse();\n }\n\n if (nmea_queue_size(nmea_rx_queue))\n {\n nmea_handle_message(nmea_queue_peek(nmea_rx_queue));\n nmea_queue_pop(nmea_rx_queue);\n }\n\n if (!lora_tx_queue_is_empty() && rfm95w_is_idle())\n {\n lora_tx_queue_transmit_and_pop();\n }\n\n if (status_check(STATUS_GPS_BROADCAST_EVENT))\n {\n status_clear(STATUS_GPS_BROADCAST_EVENT);\n g_clock_gps_broadcast_timeout_sec = GPS_BROADCAST_INTERVAL_SEC;\n p2pc_protocol_broadcast_gps_position();\n debug_log_append_line(\"GPS broadcast added to tx queue.\");\n }\n\n gps_poll();\n\n if (status_check(STATUS_AUDIO_BUFFER_UPDATE_EVENT))\n {\n status_clear(STATUS_AUDIO_BUFFER_UPDATE_EVENT);\n audio_handle_buffer_update();\n }\n\n if (rfm95w_is_idle() &&\n gps_allows_sleep_mode() &&\n termnial_allows_sleep() &&\n uart_is_write_buffer_empty())\n {\n Sleep();\n }\n }\n\n return (EXIT_SUCCESS);\n}\n\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void init(void)\n{\n uint16_t reset_reason;\n\n CLKDIVbits.RCDIV = 0;\n reset_reason = RCON;\n\n gpio_init();\n clock_start_msec_timer();\n\n uart_init();\n print_start_message(reset_reason);\n\n flash_init();\n\n while (!uart_is_write_buffer_empty()){;}\n rfm95w_init();\n while (!uart_is_write_buffer_empty()){;}\n accelerometer_init();\n while (!uart_is_write_buffer_empty()){;}\n ext_flash_init();\n\n gps_init();\n\n lora_tx_queue_init();\n\n if (flash_read_byte(FLASH_INDEX_LORA_P2PS_NOT_P2PC))\n {\n p2ps_protocol_init();\n }\n else\n {\n p2pc_protocol_init();\n }\n\n clock_start_rtc();\n\n if (p2pc_protocol_is_active())\n {\n g_clock_gps_broadcast_timeout_sec = GPS_BROADCAST_INTERVAL_SEC;\n }\n\n if (p2ps_protocol_is_active())\n {\n rfm95w_start_continuous_rx();\n }\n}\n\nstatic void print_start_message(uint16_t reset_reason)\n{\n uart_write_string(\"\\r\\n\\r\\n\");\n uart_write_string(\"Life jacket tracker V1.0.0\\r\\n\");\n uart_write_string(\"Build number: \");\n uart_write_string(BUILD_NUMBER_STRING);\n uart_write_string(\"\\r\\nLast compiled \");\n uart_write_string(__DATE__);\n uart_write_string(\", \");\n uart_write_string(__TIME__);\n uart_write_string(\"\\r\\nReset reason was:\");\n\n if (reset_reason & _RCON_TRAPR_MASK)\n {\n RCONbits.TRAPR = 0;\n uart_write_string(\"\\r\\n\\tTrap reset\");\n }\n\n if (reset_reason & _RCON_IOPUWR_MASK)\n {\n RCONbits.IOPUWR = 0;\n uart_write_string(\"\\r\\n\\tIllegal op. code\");\n }\n\n if (reset_reason & _RCON_EXTR_MASK)\n {\n RCONbits.EXTR = 0;\n uart_write_string(\"\\r\\n\\tExternal reset\");\n }\n\n if (reset_reason & _RCON_SWR_MASK)\n {\n RCONbits.SWR = 0;\n uart_write_string(\"\\r\\n\\tSoftware reset\");\n }\n\n if (reset_reason & _RCON_WDTO_MASK)\n {\n uint8_t wdt_reset_count;\n\n wdt_reset_count = flash_read_byte(FLASH_INDEX_WDT_RESETS) + 1;\n flash_init_write_buffer();\n flash_write_byte_to_buffer(FLASH_INDEX_WDT_RESETS, wdt_reset_count);\n flash_write_buffer_to_flash();\n\n RCONbits.WDTO = 0;\n sprintf(g_uart_string_buffer,\n \"\\r\\n\\tWatchdog timeout reset #%u\",\n wdt_reset_count);\n uart_write_string(g_uart_string_buffer);\n\n\n }\n\n if (reset_reason & _RCON_BOR_MASK)\n {\n RCONbits.BOR = 0;\n uart_write_string(\"\\r\\n\\tBrown out reset\");\n }\n\n if (reset_reason & _RCON_POR_MASK)\n {\n RCONbits.POR = 0;\n uart_write_string(\"\\r\\n\\tPower on reset\");\n }\n\n uart_write_string(\"\\r\\nType 'help' for help.\\r\\n\");\n}" }, { "alpha_fraction": 0.4075733721256256, "alphanum_fraction": 0.4629173278808594, "avg_line_length": 28.56291389465332, "blob_id": "1af7dd16a286b7f5760525833c64a252ef9f0b29", "content_id": "f446999545ef536dec3d4064c128b16342b8753e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4463, "license_type": "no_license", "max_line_length": 90, "num_lines": 151, "path": "/life_jacket.X/src/audio/pcm1770.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"audio/pcm1770.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n\n#include <xc.h>\n\n#include \"hal/gpio.h\"\n#include \"hal/spi_hal.h\"\n#include \"hal/clock.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n#define PCM1770_HIGHEST_REGISTER_ADDR (4)\n\n#define I2S_BITS_PER_FRAME (32)\n#define I2S_TARGET_SAMPLING_FREQ (31250ull)\n#define I2S_TARGET_FREQ (I2S_TARGET_SAMPLING_FREQ * I2S_BITS_PER_FRAME)\nstatic uint16_t I2S_BRG = \n CLOCK_HAL_PCBCLOCK_FREQ / (2 * I2S_TARGET_FREQ) - 1;\n\n// =============================================================================\n// Private variables\n// =============================================================================\nstatic uint8_t shadow_registers[PCM1770_HIGHEST_REGISTER_ADDR + 1];\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void pcm1770_i2s_init(void);\nstatic void pcm1770_i2s_deinit(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid pcm1770_init(void)\n{\n PCM1770_N_PD_PIN = 1;\n\n shadow_registers[1] = 0x3F;\n shadow_registers[2] = 0x3F;\n shadow_registers[3] = 0x80;\n shadow_registers[4] = 0x00;\n\n pcm1770_write_register(1, shadow_registers[1]);\n pcm1770_write_register(2, shadow_registers[2]);\n pcm1770_write_register(3, shadow_registers[3]);\n pcm1770_write_register(4, shadow_registers[4]);\n\n pcm1770_i2s_init();\n}\n\nvoid pcm1770_deinit(void)\n{\n PCM1770_N_PD_PIN = 0;\n\n pcm1770_i2s_deinit();\n}\n\nvoid pcm1770_write_register(uint8_t address, uint8_t value)\n{\n uint16_t value_to_write = (((uint16_t)address) << 8) & 0x8000;\n value_to_write |= value;\n\n spi_hal_setup_for_device(SPI_DEVICE_PCM1770);\n spi_hal_tranceive16(value_to_write);\n\n shadow_registers[address] = value;\n}\n\nvoid pcm1770_power_up(void)\n{\n pcm1770_write_register(4, shadow_registers[4] | 0x01);\n}\n\nvoid pcm1770_power_down(void)\n{\n pcm1770_write_register(4, shadow_registers[4] & 0xFE);\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void pcm1770_i2s_init(void)\n{\n //\n // Set up the I2S module for 16 bit, right-justified data.\n //\n SPI2IMSKL = 0; // No interrupts\n SPI2IMSKH = 0; // No interrupts\n\n SPI2CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI2CON1L = 0;\n SPI2CON1H = 0;\n\n SPI2STATL = 0; // Clear any errors\n\n SPI2IMSKLbits.SPITBEN = 1; // SPIx transmit buffer empty generates an interrupt event\n SPI2IMSKLbits.SPIRBFEN = 1;\n\n SPI2CON1Hbits.AUDMOD = 0; // I2S mode\n SPI2CON1Hbits.AUDEN = 1;\n SPI2CON1Hbits.AUDMONO = 1; // Audio data is mono\n SPI2CON1Hbits.IGNTUR = 1; // Ignore tx underrun\n\n SPI2BRGL = I2S_BRG;\n SPI2STATLbits.SPIROV = 0;\n\n SPI2CON1Lbits.MSTEN = 1; // Master mode\n SPI2CON1Lbits.CKP = 1;\n SPI2CON1Lbits.MODE32 = 0;\n SPI2CON1Lbits.MODE16 = 0;\n SPI2CON1Lbits.ENHBUF = 0; // Not enhanched buffer mode\n SPI2CON1Lbits.DISSDI = 1; // Do not use the MISO pin\n\n SPI2CON1Lbits.SPIEN = 1;\n}\n\nstatic void pcm1770_i2s_deinit(void)\n{\n //\n // Set up the I2S module for 16 bit, right-justified data.\n //\n SPI2IMSKL = 0; // No interrupts\n SPI2IMSKH = 0; // No interrupts\n\n SPI2CON1Lbits.SPIEN = 0; // Turn of and reset the module\n\n SPI2CON1L = 0;\n SPI2CON1H = 0;\n\n SPI2STATL = 0; // Clear any errors\n}" }, { "alpha_fraction": 0.42788901925086975, "alphanum_fraction": 0.47051259875297546, "avg_line_length": 31.783132553100586, "blob_id": "1daa99d0324f023139be85becdb090caa13dfda3", "content_id": "09f7322d985c4cb03f680ad4681a5e901bba223d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5443, "license_type": "no_license", "max_line_length": 108, "num_lines": 166, "path": "/life_jacket.X/src/audio/dma.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\n * This file forms a hardware abstraction layer for the DMA module\n * which is uesd in order to move data between perhipherials and memory without\n * involving the CPU.\n *\n * References:\n * - PIC24FJ128GA202 datasheet, document number DS30010038C\n * - dsPIC33/PIC24 Family Reference Manual, Direct Memory Access controller,\n * document number DS39742A\n */\n\n\n// =============================================================================\n// Include statements\n// =============================================================================\n#include <stdint.h>\n#include <stdbool.h>\n\n#include <xc.h>\n\n#include \"audio/dma.h\"\n#include \"audio/audio.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\nvolatile int16_t g_audio_tx_buff = 0;\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n//\n// Reference: DS30010038C-page 74\n// TABLE 5-1: DMA CHANNEL TRIGGER SOURCES\n//\ntypedef enum\n{\n DMA_TRIGGER_NULL = 0x00,\n DMA_TRIGGER_SPI3_GENERAL_EV = 0x01,\n DMA_TRIGGER_I2C1_SLAVE_EV = 0x02,\n DMA_TRIGGER_UART4_TRANSMIT_EV = 0x03,\n DMA_TRIGGER_UART4_RECEIVE_EV = 0x04,\n DMA_TRIGGER_UART4_ERROR = 0x05,\n DMA_TRIGGER_UART3_TRANSMIT_EV = 0x06,\n DMA_TRIGGER_UART3_RECEIVE_EV = 0x07,\n DMA_TRIGGER_UART3_ERROR = 0x08,\n DMA_TRIGGER_CTMU_EV = 0x09,\n DMA_TRIGGER_HLVD = 0x0A,\n DMA_TRIGGER_CRC_DONE = 0x0B,\n DMA_TRIGGER_UART2_ERROR = 0x0C,\n DMA_TRIGGER_UART1_ERROR = 0x0D,\n DMA_TRIGGER_RTCC = 0x0E,\n DMA_TRIGGER_DMA_CH5 = 0x0F,\n DMA_TRIGGER_EXT_INT_4 = 0x10,\n DMA_TRIGGER_EXT_INT_3 = 0x11,\n DMA_TRIGGER_SPI2_RECEIVE_EV = 0x12,\n DMA_TRIGGER_I2C2_MSTR_EV = 0x13,\n DMA_TRIGGER_DMA_CH4 = 0x14,\n DMA_TRIGGER_EPMP = 0x15,\n DMA_TRIGGER_SPI1_RECEIVE_EV = 0x16,\n DMA_TRIGGER_OUTPUT_COMPARE_6 = 0x17,\n DMA_TRIGGER_OUTPUT_COMPARE_5 = 0x18,\n DMA_TRIGGER_INPUT_CAPTURE_6 = 0x19,\n DMA_TRIGGER_INPUT_CAPTURE_5 = 0x1A,\n DMA_TRIGGER_INPUT_CAPTURE_4 = 0x1B,\n DMA_TRIGGER_INPUT_CAPTURE_3 = 0x1C,\n DMA_TRIGGER_DMA_CH3 = 0x1D,\n DMA_TRIGGER_SPI2_TRANSMIT_EV = 0x1E,\n DMA_TRIGGER_SPI2_GENERAL_EV = 0x1F,\n //\n // ... \n //\n DMA_TRIGGER_CRYPTOGRAPHIC_DONE = 0x3F\n} dma_channel_trigger_source_t;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n/**\n * @brief Initializes the DMA module.\n * @param void\n * @return void\n */\nvoid dma_init(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid dma_i2s_ch_init(void)\n{\n dma_init();\n\n DMACH0bits.CHEN = 0; // Disable channel 0\n\n DMASRC0 = (uint16_t)audio_get_front_buffer();\n DMADST0 = (uint16_t)&SPI2BUFL;\n\n DMACNT0 = AUDIO_BUFFER_LENGTH; // n transfers before an interrupt is \n // generated (mono not LR)\n DMACH0bits.SIZE = 0; // 16 bit transfer\n DMACH0bits.TRMODE = 0; // One-shot mode\n DMACH0bits.SAMODE = 1; // DMASRC is incremented based on SIZE bit after a transfer completion\n DMACH0bits.DAMODE = 0; // DMADST unchanged after a transfer completion\n DMACH0bits.RELOAD = 0; // DMASRC, DMADST and DMACNT are not reloaded on the start of the next operation\n \n DMACH0bits.CHEN = 1;\n\n DMAINT0bits.CHSEL = DMA_TRIGGER_SPI2_TRANSMIT_EV;\n\n IFS0bits.DMA0IF = 0;\n IPC1bits.DMA0IP = 6;\n IEC0bits.DMA0IE = 1;\n\n SPI2BUFL = 0; // Start the first transfer\n}\n\nvoid dma_i2s_ch_deinit(void)\n{\n IEC0bits.DMA0IE = 0;\n DMACH0bits.CHEN = 0;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nvoid dma_init(void)\n{\n int16_t * min_limit;\n int16_t * max_limit;\n\n audio_get_sample_pointer_limits(&min_limit, &max_limit);\n\n DMACONbits.DMAEN = 1; // Enable the DMA\n DMACONbits.PRSSEL = 0; // Fixed priority scheme\n\n DMAL = (uint16_t)min_limit - 4;\n DMAH = (uint16_t)max_limit + 4;\n}\n\nvoid __attribute((interrupt, no_auto_psv)) _DMA0Interrupt()\n{\n DMACH0bits.CHEN = 1;\n \n DMAINT0 &= 0xFF00; // Clear the interrupt flags\n\n audio_switch_buffer();\n\n DMASRC0 = (uint16_t)audio_get_front_buffer();\n DMADST0 = (uint16_t)&SPI2BUFL;\n DMACNT0 = AUDIO_BUFFER_LENGTH;\n\n IFS2bits.SPI2TXIF = 0; // Clear the trigger source\n IFS0bits.DMA0IF = 0;\n}\n\n" }, { "alpha_fraction": 0.35535168647766113, "alphanum_fraction": 0.3663608431816101, "avg_line_length": 27.66666603088379, "blob_id": "04727c04521f92aac2d02502811389b1ead62d3a", "content_id": "4af09076c7357d96e53f0ca43413895ab5b99114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 80, "num_lines": 57, "path": "/life_jacket.X/inc/hal/clock.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef CLOCK_HAL_H\n#define CLOCK_HAL_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n\nextern volatile uint16_t g_clock_gps_on_event_timeout;\nextern volatile uint16_t g_clock_gps_off_timeout;\nextern volatile uint16_t g_clock_gps_hot_start_timeout_sec;\nextern volatile uint16_t g_clock_gps_broadcast_timeout_sec;\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n#define CLOCK_HAL_PCBCLOCK_FREQ (16000000ull)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @breif Starts the msec timer.\n*/\nvoid clock_start_msec_timer(void);\n\n/**\n @brief Gets the current millisecond timer value. \n*/\nuint32_t clock_get_msec(void);\n\n/**\n @brief Starts the RTC timer.\n*/\nvoid clock_start_rtc(void);\n \n#ifdef __cplusplus\n}\n#endif\n\n#endif /* CLOCK_HAL_H */\n\n" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.44983920454978943, "avg_line_length": 21.94761848449707, "blob_id": "f547f693a16874b29f150792cca240cb1c3766a5", "content_id": "3395f1e74297fa03b2aed826ea999ecf5ae0e95d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9639, "license_type": "no_license", "max_line_length": 80, "num_lines": 420, "path": "/life_jacket.X/src/hal/uart.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\n * This file handes the UART module.\n *\n * References:\n * - PIC24FJ64GA006 datasheet, document number DS39747D, page 139\n */\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <string.h>\n#include <stdint.h>\n#include <stdbool.h>\n\n#include \"hal/uart.h\"\n#include \"hal/gpio.h\"\n#include \"hal/clock.h\"\n#include \"status.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\nchar g_uart_string_buffer[UART_STRING_BUFFER_SIZE];\n\n// =============================================================================\n// Private constants\n// =============================================================================\n#define BUFFER_SIZE ((uint16_t)256)\n#define BACKSPACE_CHAR (0x08)\n\nstatic const uint32_t UART_BAUD = 38400;\nstatic const uint32_t PERIPHERAL_FREQ = CLOCK_HAL_PCBCLOCK_FREQ;\n\nstatic const uint8_t COMMAND_TERMINATION_CHAR = '\\n';\n\n// =============================================================================\n// Private variables\n// =============================================================================\nstatic bool uart_initialized = false;\n\nstatic volatile uint8_t rx_buff[BUFFER_SIZE];\nstatic volatile uint8_t tx_buff[BUFFER_SIZE];\n\nstatic volatile uint16_t rx_buff_first = 0;\nstatic volatile uint16_t rx_buff_last = 0;\nstatic volatile uint16_t tx_buff_first = 0;\nstatic volatile uint16_t tx_buff_last = 0;\n\nstatic volatile uint16_t rx_buff_size = 0;\nstatic volatile uint16_t tx_buff_size = 0;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n/**\n * @brief Starts transmission of the tx buffer.\n * @param void\n * @return void\n */\nstatic void start_tx(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid uart_init()\n{\n volatile int16_t wait_cnt = 0;\n\n if (false == uart_initialized)\n {\n //\n // Variables\n //\n rx_buff_first = 0;\n rx_buff_last = 0;\n tx_buff_first = 0;\n tx_buff_last = 0;\n\n rx_buff_size = 0;\n tx_buff_size = 0;\n\n status_set(STATUS_UART_RECEIVE_FLAG, 0);\n\n //\n // IO ports\n //\n DEBUG_UART_TX_DIR = DIR_OUT;\n DEBUG_UART_RX_DIR = DIR_IN;\n\n //\n // UART module\n //\n U2MODE = 0x0000;\n U2STA = 0x0000;\n\n U2BRG = (PERIPHERAL_FREQ / UART_BAUD) / 16 - 1;\n\n U2MODEbits.PDSEL = 0; // 8 bit data, no parity\n U2MODEbits.STSEL = 0; // 1 Stop bit\n\n // Interrupt is generated when any character is transfered to the\n // Transmit Shift Register and the hw transmit buffer is empty.\n U2STAbits.UTXISEL0 = 0;\n U2STAbits.UTXISEL1 = 0;\n IPC7bits.U2TXIP = 2; // Interrupt priority\n IEC1bits.U2TXIE = 1; // TX interrupt enable\n\n // Interrupt is generated each time a data word is transfered from\n // the U1RSR to the receive buffer. There may be one or more characters\n // in the receive buffer.\n U2STAbits.URXISEL = 0;\n IPC7bits.U2RXIP = 2; // Interrupt priority\n IEC1bits.U2RXIE = 1; // RX interrupt enable\n\n U2MODEbits.UARTEN = 1;\n U2STAbits.UTXEN = 1;\n\n for (wait_cnt = 0; wait_cnt != PERIPHERAL_FREQ / UART_BAUD; ++wait_cnt)\n {\n ;\n }\n\n uart_initialized = true;\n }\n}\n\nvoid uart_deinit()\n{\n if (true == uart_initialized)\n {\n //\n // Variables\n //\n rx_buff_first = 0;\n rx_buff_last = 0;\n tx_buff_first = 0;\n tx_buff_last = 0;\n\n rx_buff_size = 0;\n tx_buff_size = 0;\n\n status_set(STATUS_UART_RECEIVE_FLAG, 0);\n\n // Interrupt is generated when any character is transfered to the\n // Transmit Shift Register and the hw transmit buffer is empty.\n U2STAbits.UTXISEL0 = 0;\n U2STAbits.UTXISEL1 = 0;\n IPC7bits.U2TXIP = 2; // Interrupt priority\n IEC1bits.U2TXIE = 0; // TX interrupt enable\n\n // Interrupt is generated each time a data word is transfered from\n // the U1RSR to the receive buffer. There may be one or more characters\n // in the receive buffer.\n U2STAbits.URXISEL = 0;\n IPC7bits.U2RXIP = 2; // Interrupt priority\n IEC1bits.U2RXIE = 0; // RX interrupt enable\n\n U2MODEbits.UARTEN = 0;\n\n uart_initialized = false;\n }\n}\n\nvoid uart_write(uint8_t data)\n{\n if ((0 == tx_buff_size) && (0 == U2STAbits.UTXBF))\n {\n // hw transmit buffer not full but tx buffer is.\n U2TXREG = data;\n }\n else if (tx_buff_size < BUFFER_SIZE)\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = data;\n\n ++tx_buff_size;\n }\n}\n\nvoid uart_write_string(const char* data)\n{\n const uint8_t* p = (const uint8_t*)data;\n\n // Update the tx buffer.\n while (*p && (tx_buff_size < BUFFER_SIZE))\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = *(p++);\n\n ++tx_buff_size;\n }\n\n start_tx();\n}\n\nvoid uart_write_array(uint16_t nbr_of_bytes, const uint8_t* data)\n{\n uint16_t i;\n\n // Update the tx buffer.\n for (i = 0; i != nbr_of_bytes; ++i)\n {\n if (tx_buff_size < BUFFER_SIZE)\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = *(data++);\n\n if (tx_buff_size < BUFFER_SIZE)\n {\n ++tx_buff_size;\n }\n }\n }\n\n start_tx();\n}\n\nbool uart_is_write_buffer_empty(void)\n{\n return tx_buff_size == 0;\n}\n\nuint8_t uart_get(uint16_t index)\n{\n uint16_t i;\n uint8_t data;\n\n uart_disable_rx_interrupt();\n\n i = rx_buff_first + index;\n\n if (i >= BUFFER_SIZE)\n {\n i -= BUFFER_SIZE;\n }\n\n data = rx_buff[i];\n\n uart_enable_rx_interrupt();\n\n return data;\n}\n\nuint16_t uart_get_receive_buffer_size(void)\n{\n return rx_buff_size;\n}\n\nbool uart_is_receive_buffer_empty(void)\n{\n return (0 == rx_buff_size);\n}\n\nvoid uart_clear_receive_buffer(void)\n{\n uart_disable_rx_interrupt();\n\n rx_buff_size = 0;\n rx_buff_first = 0;\n rx_buff_last = 0;\n\n uart_enable_rx_interrupt();\n}\n\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nvoid __attribute__((interrupt, no_auto_psv)) _U2TXInterrupt(void)\n{\n while ((0 == U2STAbits.UTXBF) && (0 != tx_buff_size))\n {\n // TX fifo not full and there are more things to send\n U2TXREG = tx_buff[tx_buff_first];\n\n if (1 != tx_buff_size)\n {\n ++tx_buff_first;\n }\n\n if (tx_buff_first >= BUFFER_SIZE)\n {\n tx_buff_first = 0;\n }\n\n --tx_buff_size;\n }\n\n IFS1bits.U2TXIF = 0;\n}\n\nvoid __attribute__((interrupt, no_auto_psv)) _U2RXInterrupt(void)\n{\n uint8_t received;\n\n uart_disable_tx_interrupt();\n\n if (U2STAbits.OERR)\n {\n U2STAbits.OERR = 0;\n }\n\n while (U2STAbits.URXDA)\n {\n received = U2RXREG;\n\n if (COMMAND_TERMINATION_CHAR == received)\n {\n status_set(STATUS_UART_RECEIVE_FLAG, true);\n }\n\n if (BACKSPACE_CHAR != received)\n {\n if (0 != rx_buff_size)\n {\n ++rx_buff_last;\n\n if (rx_buff_last >= BUFFER_SIZE)\n {\n rx_buff_last = 0;\n }\n }\n\n rx_buff[rx_buff_last] = received;\n\n ++rx_buff_size;\n }\n else\n {\n if (1 < rx_buff_size)\n {\n\n if (0 != rx_buff_last)\n {\n --rx_buff_last;\n }\n else\n {\n rx_buff_last = BUFFER_SIZE - 1;\n }\n }\n\n if (0 != rx_buff_size)\n {\n --rx_buff_size;\n }\n }\n\n\n uart_write(received);\n }\n\n uart_enable_tx_interrupt();\n\n IFS1bits.U2RXIF = 0;\n}\n\nstatic void start_tx(void)\n{\n uart_disable_tx_interrupt();\n uart_disable_rx_interrupt();\n\n while ((0 != tx_buff_size) && (0 == U2STAbits.UTXBF))\n {\n U2TXREG = tx_buff[tx_buff_first];\n\n if (1 != tx_buff_size)\n {\n ++tx_buff_first;\n }\n\n if (tx_buff_first >= BUFFER_SIZE)\n {\n tx_buff_first = 0;\n }\n\n --tx_buff_size;\n }\n\n uart_enable_tx_interrupt();\n uart_enable_rx_interrupt();\n}\n\n" }, { "alpha_fraction": 0.48037049174308777, "alphanum_fraction": 0.5076307654380798, "avg_line_length": 32.4542236328125, "blob_id": "8b405d146386e0358ed57ed9db04fdf06f30d1ef", "content_id": "700305f99224d41076a61f17a5013422aaec03ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9501, "license_type": "no_license", "max_line_length": 95, "num_lines": 284, "path": "/life_jacket.X/src/lora/p2ps_protocol.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"lora\\p2ps_protocol.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"lora/rfm95w.h\"\n#include \"uart/debug_log.h\"\n#include \"hal/uart.h\"\n#include \"lora/lora_tx_queue.h\"\n#include \"gps/gps.h\"\n#include \"hal/flash.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\nstatic const uint32_t P2P_BROADCAST_ADDRESS = 0xFFFFFFFF;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic uint8_t frame_number;\nstatic uint32_t my_address;\nstatic bool initialized = false;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void p2ps_handle_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr,\n rfm95w_ack_parameters_t * ack_parameters,\n rfm95w_buffer_t * ack);\n\nstatic void p2ps_parse_header(p2p_frame_header_t * header,\n const uint8_t * data);\n\nstatic void p2ps_print_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr);\n\nstatic void p2ps_parse_gps_coordinates(nmea_coordinates_info_t * coordinates,\n const uint8_t * data);\n\nstatic void p2ps_print_coordinates(\n const nmea_coordinates_info_t * coordinates);\n\nstatic void p2ps_handle_gps_position(const uint8_t * data,\n uint8_t length);\n\nstatic void p2ps_prepare_ack(rfm95w_buffer_t * ack, p2p_frame_header_t header);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid p2ps_protocol_init(void)\n{\n rfm95w_register_received_message_callback(p2ps_handle_received_message);\n\n frame_number = 0;\n my_address = flash_read_dword(FLASH_INDEX_LORA_ADDRESS_MSB);\n\n initialized = true;\n debug_log_append_line(\"LORA P2PS initialized\");\n}\n\nbool p2ps_protocol_is_active(void)\n{\n return initialized;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void p2ps_handle_received_message(\n const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr,\n rfm95w_ack_parameters_t * ack_parameters,\n rfm95w_buffer_t * ack)\n{\n p2p_frame_header_t header;\n\n ack_parameters->send_ack = false;\n ack_parameters->wait_for_ack = false;\n ack_parameters->was_valid_ack = false;\n\n p2ps_print_received_message(data, length, rssi, snr);\n\n p2ps_parse_header(&header, data);\n\n if ((P2P_BROADCAST_ADDRESS == header.destination_address) ||\n (my_address == header.destination_address))\n {\n uint8_t data_type = data[P2P_INDEX_DATA_TYPE];\n\n switch (data_type)\n {\n case P2P_DATA_TYPE_GPS_POSITION:\n p2ps_handle_gps_position(data, length);\n p2ps_prepare_ack(ack, header);\n ack_parameters->send_ack = true;\n break;\n\n default:\n break;\n }\n }\n}\n\nstatic void p2ps_parse_header(p2p_frame_header_t * header,\n const uint8_t * data)\n{\n header->source_address = 0;\n header->source_address |= ((uint32_t)data[P2P_INDEX_SOURCE + 0]) << 24;\n header->source_address |= ((uint32_t)data[P2P_INDEX_SOURCE + 1]) << 16;\n header->source_address |= ((uint32_t)data[P2P_INDEX_SOURCE + 2]) << 8;\n header->source_address |= ((uint32_t)data[P2P_INDEX_SOURCE + 3]) << 0;\n\n header->destination_address = 0;\n header->destination_address |= ((uint32_t)data[P2P_INDEX_DESTINATION + 0]) << 24;\n header->destination_address |= ((uint32_t)data[P2P_INDEX_DESTINATION + 1]) << 16;\n header->destination_address |= ((uint32_t)data[P2P_INDEX_DESTINATION + 2]) << 8;\n header->destination_address |= ((uint32_t)data[P2P_INDEX_DESTINATION + 3]) << 0;\n\n header->time_to_live = data[P2P_INDEX_TIME_TO_LIVE];\n header->frame_number = data[P2P_INDEX_FRAME_NUMBER];\n header->protocol = data[P2P_INDEX_PROTOCOL];\n header->data_type = data[P2P_INDEX_DATA_TYPE];\n}\n\nstatic void p2ps_print_received_message(const uint8_t * data,\n uint8_t length,\n int16_t rssi,\n uint8_t snr)\n{\n uint8_t i;\n char * p = g_uart_string_buffer;\n\n sprintf(g_uart_string_buffer, \"Received: \");\n p += strlen(g_uart_string_buffer);\n\n for (i = 0; i != length; ++i)\n {\n sprintf(p, \"%02X \", data[i]);\n p += 3;\n }\n\n sprintf(p, \"RSSI = %d SNR = %f dB\", rssi, ((double)(int8_t)snr) / 4);\n\n debug_log_append_line(g_uart_string_buffer);\n}\n\nstatic void p2ps_parse_gps_coordinates(nmea_coordinates_info_t * coordinates,\n const uint8_t * data)\n{\n uint8_t * minutes_pointer;\n\n coordinates->latitude_north = (0 != (data[P2P_GPS_INDEX_LATITUDE_DEG + 0] & 0x80));\n\n coordinates->latitude_deg = (uint16_t)(data[P2P_GPS_INDEX_LATITUDE_DEG + 0] & 0x7F) << 8;\n coordinates->latitude_deg |= (uint16_t)(data[P2P_GPS_INDEX_LATITUDE_DEG + 1]);\n\n coordinates->longitude_east = (0 != (data[P2P_GPS_INDEX_LONGITUDE_DEG + 0] & 0x80));\n\n coordinates->longitude_deg = (uint16_t)(data[P2P_GPS_INDEX_LONGITUDE_DEG + 0] & 0x7F) << 8;\n coordinates->longitude_deg |= (uint16_t)(data[P2P_GPS_INDEX_LONGITUDE_DEG + 1]);\n\n minutes_pointer = (uint8_t*)&(coordinates->latitude_minutes);\n\n *(minutes_pointer + 0) = data[P2P_GPS_INDEX_LATITUDE_MINUTES + 0];\n *(minutes_pointer + 1) = data[P2P_GPS_INDEX_LATITUDE_MINUTES + 1];\n *(minutes_pointer + 2) = data[P2P_GPS_INDEX_LATITUDE_MINUTES + 2];\n *(minutes_pointer + 3) = data[P2P_GPS_INDEX_LATITUDE_MINUTES + 3];\n\n minutes_pointer = (uint8_t*)&(coordinates->longitude_minutes);\n\n *(minutes_pointer + 0) = data[P2P_GPS_INDEX_LONGITUDE_MINUTES + 0];\n *(minutes_pointer + 1) = data[P2P_GPS_INDEX_LONGITUDE_MINUTES + 1];\n *(minutes_pointer + 2) = data[P2P_GPS_INDEX_LONGITUDE_MINUTES + 2];\n *(minutes_pointer + 3) = data[P2P_GPS_INDEX_LONGITUDE_MINUTES + 3];\n\n coordinates->time_of_fix_hours = data[P2P_GPS_INDEX_TOF_HOURS];\n coordinates->time_of_fix_minutes = data[P2P_GPS_INDEX_TOF_MINUTES];\n coordinates->time_of_fix_seconds = data[P2P_GPS_INDEX_TOD_SECONDS];\n}\n\nstatic void p2ps_print_coordinates(\n const nmea_coordinates_info_t * coordinates)\n{\n char ns = 'N';\n char ew = 'E';\n\n if (coordinates->latitude_north)\n {\n ns = 'N';\n }\n else\n {\n ns = 'S';\n }\n\n if (coordinates->longitude_east)\n {\n ew = 'E';\n }\n else\n {\n ew = 'W';\n }\n\n\n sprintf(g_uart_string_buffer,\n \"GPS COORDINTATE: %u%c %.4f' %c, %u%c %.4f' %c %02u:%02u:%02u\\r\\n\",\n coordinates->latitude_deg,\n 176,\n (double)coordinates->latitude_minutes,\n ns,\n coordinates->longitude_deg,\n 176,\n (double)coordinates->longitude_minutes,\n ew,\n coordinates->time_of_fix_hours,\n coordinates->time_of_fix_minutes,\n coordinates->time_of_fix_seconds\n );\n\n uart_write_string(g_uart_string_buffer);\n}\n\nstatic void p2ps_handle_gps_position(const uint8_t * data,\n uint8_t length)\n{\n nmea_coordinates_info_t coordinates;\n\n p2ps_parse_gps_coordinates(&coordinates,\n &data[P2P_INDEX_APPLICATION]);\n\n p2ps_print_coordinates(&coordinates);\n}\n\nstatic void p2ps_prepare_ack(rfm95w_buffer_t * ack, p2p_frame_header_t header)\n{\n ack->data[0] = (uint8_t)(my_address >> 24);\n ack->data[1] = (uint8_t)(my_address >> 16);\n ack->data[2] = (uint8_t)(my_address >> 8);\n ack->data[3] = (uint8_t)(my_address >> 0);\n\n ack->data[4] = (uint8_t)(header.source_address >> 24);\n ack->data[5] = (uint8_t)(header.source_address >> 16);\n ack->data[6] = (uint8_t)(header.source_address >> 8);\n ack->data[7] = (uint8_t)(header.source_address >> 0);\n\n ack->data[8] = header.time_to_live;\n ack->data[9] = header.frame_number;\n ack->data[10] = header.protocol;\n ack->data[11] = P2P_DATA_TYPE_ACK;\n\n ack->length = 12;\n}\n" }, { "alpha_fraction": 0.45837897062301636, "alphanum_fraction": 0.4886089861392975, "avg_line_length": 26.33832359313965, "blob_id": "c581bdd13abbe4fb481e2d805f22633b621a0263", "content_id": "c2f37127e3369c435e1ff291d32f98d0f4ee47dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9130, "license_type": "no_license", "max_line_length": 80, "num_lines": 334, "path": "/life_jacket.X/src/audio/audio.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n\n#include <xc.h>\n\n#include \"hal/gpio.h\"\n#include \"audio/audio.h\"\n#include \"audio/dma.h\"\n#include \"audio/pcm1770.h\"\n#include \"audio/ext_flash.h\"\n#include \"status.h\"\n\n#include \"hal/uart.h\"\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\ntypedef struct audio_flash_header_t\n{\n uint16_t number_of_tracks;\n} audio_flash_header_t;\n\ntypedef struct audio_track_header_t\n{\n uint32_t start_address;\n uint32_t number_of_samples;\n} audio_track_header_t;\n\n#define AUDIO_TRACK_HEADER_SIZE (8)\n\ntypedef struct audio_track_t\n{\n uint32_t number_of_samples;\n uint32_t samples_played;\n uint32_t start_address;\n uint32_t next_address;\n} audio_track_t;\n\ntypedef enum\n{\n AUDIO_FLASH_ADDRESS_NUMBER_OF_TRACKS = 0,\n AUDIO_FLASH_ADDRESS_TRACK_HEADERS_START = 2\n} audio_flash_address_t;\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic int16_t audio_buffer[AUDIO_BUFFER_LENGTH + AUDIO_BUFFER_LENGTH];\nstatic bool is_using_buffer_a;\nstatic int16_t * buffer_a;\nstatic int16_t * buffer_b;\nstatic int16_t * buffer_a_end;\nstatic int16_t * buffer_b_end;\n\nstatic audio_track_t track;\nstatic bool playback_session_active = false;;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic int16_t * audio_get_back_buffer(void);\nstatic void audio_load_test_data(void);\n\nstatic uint16_t audio_get_number_of_tracks(void);\nstatic void get_track_header(audio_track_header_t * header,\n uint16_t track_number);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid audio_init(void)\n{\n buffer_a = &audio_buffer[0];\n buffer_b = &audio_buffer[AUDIO_BUFFER_LENGTH];\n\n buffer_a_end = &audio_buffer[AUDIO_BUFFER_LENGTH];\n buffer_b_end = &audio_buffer[AUDIO_BUFFER_LENGTH + AUDIO_BUFFER_LENGTH];\n\n playback_session_active = false;\n}\n\nvoid audio_start_playback_session(uint16_t track_number)\n{\n audio_track_header_t track_header;\n\n audio_init();\n\n if (track_number >= audio_get_number_of_tracks())\n {\n debug_log_append_line(\"Tried to play non-existent track.\");\n return;\n }\n\n get_track_header(&track_header, track_number);\n \n track.start_address = track_header.start_address;\n track.number_of_samples = track_header.number_of_samples;\n\n track.samples_played = 0;\n track.next_address = track.start_address;\n\n ext_flash_read((uint16_t*)audio_get_front_buffer(),\n track.next_address,\n AUDIO_BUFFER_SIZE);\n\n track.next_address += AUDIO_BUFFER_SIZE;\n track.samples_played += AUDIO_BUFFER_LENGTH;\n\n ext_flash_read(audio_get_back_buffer(),\n track.next_address,\n AUDIO_BUFFER_SIZE);\n\n track.next_address += AUDIO_BUFFER_SIZE;\n track.samples_played += AUDIO_BUFFER_LENGTH;\n\n pcm1770_init();\n dma_i2s_ch_init();\n\n AMP_N_SD_PIN = 1;\n AMP_N_SD_BOOST_PIN = 1;\n\n playback_session_active = true;\n\n sprintf(g_uart_string_buffer,\n \"Playback of track %u started\",\n track_number);\n debug_log_append_line(g_uart_string_buffer);\n}\n\nvoid audio_handle_buffer_update(void)\n{\n if (track.samples_played < track.number_of_samples)\n {\n ext_flash_read(audio_get_back_buffer(),\n track.next_address,\n AUDIO_BUFFER_SIZE);\n\n track.next_address += AUDIO_BUFFER_SIZE;\n track.samples_played += AUDIO_BUFFER_LENGTH;\n }\n else\n {\n pcm1770_deinit();\n dma_i2s_ch_deinit();\n\n AMP_N_SD_PIN = 0;\n AMP_N_SD_BOOST_PIN = 0;\n\n playback_session_active = false;\n\n debug_log_append_line(\"Audio playback finished.\");\n }\n}\n\nbool audio_is_playback_in_progress(void)\n{\n return playback_session_active;\n}\n\nconst int16_t * audio_get_front_buffer(void)\n{\n const int16_t * current_buffer = audio_buffer;\n\n if (!is_using_buffer_a)\n {\n current_buffer += AUDIO_BUFFER_LENGTH;\n }\n\n return current_buffer;\n}\n\nvoid audio_switch_buffer(void)\n{\n is_using_buffer_a = !is_using_buffer_a;\n\n status_set(STATUS_AUDIO_BUFFER_UPDATE_EVENT, true);\n}\n\nvoid audio_get_sample_pointer_limits(int16_t ** min, int16_t ** max)\n{\n *min = buffer_a;\n *max = buffer_b_end;\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic int16_t * audio_get_back_buffer(void)\n{\n int16_t * current_buffer = audio_buffer;\n\n if (is_using_buffer_a)\n {\n current_buffer += AUDIO_BUFFER_LENGTH;\n }\n\n return current_buffer;\n}\n\nstatic void audio_load_test_data(void)\n{\n uint16_t i;\n uint16_t k;\n uint16_t index;\n\n for (i = 0; i != 8; ++i)\n {\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + k;\n buffer_a[index] = k * 4;\n buffer_b[index] = k * 4;\n }\n\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + 32 + k;\n buffer_a[index] = (32 - k) * 4;\n buffer_b[index] = (32 - k) * 4;\n }\n }\n \n ext_flash_chip_erase();\n\n ext_flash_program_page(&buffer_a[128 * 0], 256 * 0);\n ext_flash_program_page(&buffer_a[128 * 1], 256 * 1);\n ext_flash_program_page(&buffer_a[128 * 2], 256 * 2);\n ext_flash_program_page(&buffer_a[128 * 3], 256 * 3);\n\n ext_flash_program_page(&buffer_b[128 * 0], 256 * 4);\n ext_flash_program_page(&buffer_b[128 * 1], 256 * 5);\n ext_flash_program_page(&buffer_b[128 * 2], 256 * 6);\n ext_flash_program_page(&buffer_b[128 * 3], 256 * 7);\n\n for (i = 0; i != 8; ++i)\n {\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + k;\n buffer_a[index] = 0;\n buffer_b[index] = 0;\n }\n\n for (k = 0; k != 32; ++k)\n {\n index = i * 64 + 32 + k;\n buffer_a[index] = 0;\n buffer_b[index] = 0;\n }\n }\n\n ext_flash_read(buffer_a, 0, 1024);\n ext_flash_read(buffer_b, 1024, 1024);\n\n sprintf(g_uart_string_buffer,\n \"Buffer A:\\r\\n\");\n uart_write_string(g_uart_string_buffer);\n\n for (k = 0; k != 64; ++k)\n {\n sprintf(g_uart_string_buffer,\n \"%04X %04X %04X %04X %04X %04X %04X %04X\\r\\n\",\n buffer_a[8 * k + 0],\n buffer_a[8 * k + 1],\n buffer_a[8 * k + 2],\n buffer_a[8 * k + 3],\n buffer_a[8 * k + 4],\n buffer_a[8 * k + 5],\n buffer_a[8 * k + 6],\n buffer_a[8 * k + 7]);\n uart_write_string(g_uart_string_buffer);\n\n while (!uart_is_write_buffer_empty()){;}\n }\n\n sprintf(g_uart_string_buffer,\n \"\\r\\n\\r\\nBuffer B:\\r\\n\");\n uart_write_string(g_uart_string_buffer);\n\n for (k = 0; k != 64; ++k)\n {\n sprintf(g_uart_string_buffer,\n \"%04X %04X %04X %04X %04X %04X %04X %04X\\r\\n\",\n buffer_b[8 * k + 0],\n buffer_b[8 * k + 1],\n buffer_b[8 * k + 2],\n buffer_b[8 * k + 3],\n buffer_b[8 * k + 4],\n buffer_b[8 * k + 5],\n buffer_b[8 * k + 6],\n buffer_b[8 * k + 7]);\n uart_write_string(g_uart_string_buffer);\n\n while (!uart_is_write_buffer_empty()){;}\n }\n\n track.number_of_samples = 1024;\n track.samples_played = 0;\n track.start_address = 0;\n track.next_address = 0;\n}\n\nstatic uint16_t audio_get_number_of_tracks(void)\n{\n return ext_flash_read_word(AUDIO_FLASH_ADDRESS_NUMBER_OF_TRACKS);\n}\n\nstatic void get_track_header(audio_track_header_t * header,\n uint16_t track_number)\n{\n uint32_t header_address = AUDIO_FLASH_ADDRESS_TRACK_HEADERS_START +\n (AUDIO_TRACK_HEADER_SIZE * track_number);\n\n header->start_address = ext_flash_read_dword(header_address);\n header->number_of_samples = ext_flash_read_dword(header_address + 4);\n}" }, { "alpha_fraction": 0.49426889419555664, "alphanum_fraction": 0.5313336849212646, "avg_line_length": 32.21708297729492, "blob_id": "91579928c08aa768792ec1e1d85999419ccf996f", "content_id": "1971e3ca859edbaf2ae145ae4fde5277d84f583c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9335, "license_type": "no_license", "max_line_length": 80, "num_lines": 281, "path": "/life_jacket.X/inc/hal/gpio.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef GPIO_H\n#define\tGPIO_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n \n#define DIR_IN 1\n#define DIR_OUT 0\n\n//\n// LoRa\n//\n#define LORA_DIO0_PIN PORTAbits.RA0\n#define LORA_DIO0_DIR TRISAbits.TRISA0\n#define LORA_DIO0_CNEN CNEN1bits.CN2IE\n\n#define LORA_DIO1_PIN PORTAbits.RA1\n#define LORA_DIO1_DIR TRISAbits.TRISA1\n#define LORA_DIO1_CNEN CNEN1bits.CN3IE\n\n#define LORA_DIO2_PIN PORTAbits.RA2\n#define LORA_DIO2_DIR TRISAbits.TRISA2\n#define LORA_DIO2_CNEN CNEN2bits.CN30IE\n\n#define LORA_DIO3_PIN PORTCbits.RC9\n#define LORA_DIO3_DIR TRISCbits.TRISC9\n#define LORA_DIO3_CNEN CNEN2bits.CN19IE\n\n// LORA_DIO4 is input only\n#define LORA_DIO4_PIN PORTAbits.RA4\n#define LORA_DIO4_CNEN CNEN1bits.CN0IE\n\n#define LORA_DIO5_PIN PORTAbits.RA7\n#define LORA_DIO5_DIR TRISAbits.TRISA7\n#define LORA_DIO5_CNEN CNEN3bits.CN33IE\n\n#define LORA_N_CS_PIN LATCbits.LATC4\n#define LORA_N_CS_DIR TRISCbits.TRISC4\n#define LORA_CS_ON do {LORA_N_CS_PIN = 0;} while (0)\n#define LORA_CS_OFF do {LORA_N_CS_PIN = 1;} while (0)\n\n#define LORA_N_RESET_PIN LATCbits.LATC8\n#define LORA_N_RESET_DIR TRISCbits.TRISC8\n#define LORA_RESET_ON do {LORA_N_RESET_PIN = 0;} while (0)\n#define LORA_RESET_OFF do {LORA_N_RESET_PIN = 1;} while (0)\n\n#define LORA_ANT_SEL1_PIN LATBbits.LATB9\n#define LORA_ANT_SEL1_DIR TRISBbits.TRISB9\n\n#define LORA_ANT_SEL2_PIN LATBbits.LATB10\n#define LORA_ANT_SEL2_DIR TRISBbits.TRISB10\n\n#define LORA_SELECT_ANT_1 do {LORA_ANT_SEL1_PIN = 0; \\\n LORA_ANT_SEL2_PIN = 1;} while (0)\n\n#define LORA_SELECT_ANT_2 do {LORA_ANT_SEL2_PIN = 0; \\\n LORA_ANT_SEL1_PIN = 1;} while (0)\n\n//\n// GPS\n//\n#define GPS_TIMEPULSE_PIN PORTAbits.RA8\n#define GPS_TIMEPULSE_DIR TRISAbits.TRISA8\n\n#define GPS_ON_OFF_PIN LATAbits.LATA10\n#define GPS_ON_OFF_DIR TRISAbits.TRISA10\n\n#define GPS_TXD_PIN LATBbits.LATB15\n#define GPS_TXD_DIR TRISBbits.TRISB15\n#define GPS_TXD_PPS_REG RPOR7bits.RP15R\n\n#define GPS_RXD_PIN PORTBbits.RB14\n#define GPS_RXD_DIR TRISBbits.TRISB14\n#define GPS_RXD_PPS_REG RPINR18bits.U1RXR\n#define GPS_RXD_RP_PIN (14)\n\n#define GPS_ANT_SEL1_PIN PORTAbits.RA9\n#define GPS_ANT_SEL1_DIR TRISAbits.TRISA9\n\n#define GPS_ANT_SEL2_PIN PORTCbits.RC7\n#define GPS_ANT_SEL2_DIR TRISCbits.TRISC7\n\n#define GPS_SELECT_ANT_1 do {GPS_ANT_SEL1_PIN = 0; \\\n GPS_ANT_SEL2_PIN = 1;} while (0)\n\n#define GPS_SELECT_ANT_2 do {GPS_ANT_SEL2_PIN = 0; \\\n GPS_ANT_SEL1_PIN = 1;} while (0)\n\n//\n// Audio\n//\n#define PCM1770_SCKI_PIN LATAbits.LATA3\n#define PCM1770_SCKI_DIR TRISAbits.TRISA3\n\n#define PCM1770_N_CS_PIN LATCbits.LATC5\n#define PCM1770_N_CS_DIR TRISCbits.TRISC5\n#define PCM1770_CS_ON do {PCM1770_N_CS_PIN = 0;} while (0)\n#define PCM1770_CS_OFF do {PCM1770_N_CS_PIN = 1;} while (0)\n\n#define PCM1770_N_PD_PIN LATBbits.LATB8\n#define PCM1770_N_PD_DIR TRISBbits.TRISB8\n\n#define PCM1770_BCK_PIN LATBbits.LATB11\n#define PCM1770_BCK_DIR TRISBbits.TRISB11\n#define PCM1770_BCK_PPS_REG RPOR5bits.RP11R\n\n#define PCM1770_DATA_PIN LATBbits.LATB12\n#define PCM1770_DATA_DIR TRISBbits.TRISB12\n#define PCM1770_DATA_PPS_REG RPOR6bits.RP12R\n\n#define PCM1770_LRCK_PIN LATB13bits.LATB13\n#define PCM1770_LRCK_DIR TRISBbits.TRISB13\n#define PCM1770_LRCK_PPS_REG RPOR6bits.RP13R\n \n#define AMP_N_SD_PIN LATBbits.LATB7\n#define AMP_N_SD_DIR TRISBbits.TRISB7\n\n#define AMP_N_SD_BOOST_PIN LATBbits.LATB6\n#define AMP_N_SD_BOOST_DIR TRISBbits.TRISB6\n\n//\n// Accelerometer\n//\n#define ACC_N_CS_PIN LATCbits.LATC3\n#define ACC_N_CS_DIR TRISCbits.TRISC3\n#define ACC_CS_ON do {ACC_N_CS_PIN = 0;} while (0)\n#define ACC_CS_OFF do {ACC_N_CS_PIN = 1;} while (0)\n\n//\n// Flash\n//\n#define FLASH_N_CS_PIN\t\t\t\tLATCbits.LATC6\n#define FLASH_N_CS_DIR TRISCbits.TRISC6\n#define FLASH_CS_ON do {FLASH_N_CS_PIN = 0;} while (0)\n#define FLASH_CS_OFF do {FLASH_N_CS_PIN = 1;} while (0)\n\n//\n// Common SPI bus\n//\n#define SPI_CLK_PIN LATCbits.LATC0\n#define SPI_CLK_DIR TRISCbits.TRISC0\n#define SPI_CLK_PPS_REG RPOR8bits.RP16R\n\n#define SPI_MOSI_PIN LATCbits.LATC1\n#define SPI_MOSI_DIR TRISCbits.TRISC1\n#define SPI_MOSI_PPS_REG RPOR8bits.RP17R\n#define SPI_MISO_RP_PIN (18)\n\n#define SPI_MISO_PIN PORTCbits.PORTC2\n#define SPI_MISO_DIR TRISCbits.TRISC2\n#define SPI_MISO_PPS_REG RPINR20bits.SDI1R\n\n\n//\n// Debug\n//\n#define DEBUG_UART_RX_PIN PORTBbits.PORTB2\n#define DEBUG_UART_RX_DIR TRISBbits.TRISB2\n#define DEBUG_UART_RX_PPS_REG RPINR19bits.U2RXR\n#define DEBUG_UART_RX_RP_PIN (2)\n\n#define DEBUG_UART_TX_PIN LATBbits.LATB3\n#define DEBUG_UART_TX_DIR TRISBbits.TRISB3\n#define DEBUG_UART_TX_PPS_REG RPOR1bits.RP3R\n\n\n#define NC1_PIN LATBbits.LATB4\n#define NC1_DIR TRISBbits.TRISB4\n#define NC1_PULL_DOWN CNPD1bits.CN0PDE\n\n#define NC2_PIN LATBbits.LATB5\n#define NC2_DIR TRISBbits.TRISB5\n\n#define PGD1_PULL_DOWN CNPD1bits.CN4PDE\n#define PGC1_PULL_DOWN CNPD1bits.CN5PDE\n\ntypedef enum\n{\n GPIO_PPS_OUT_NULL = 0,\n GPIO_PPS_OUT_C1OUT = 1,\n GPIO_PPS_OUT_C2OUT = 2,\n GPIO_PPS_OUT_U1TX = 3,\n GPIO_PPS_OUT_U1RTS = 4,\n GPIO_PPS_OUT_U2TX = 5,\n GPIO_PPS_OUT_U2RTS = 6,\n GPIO_PPS_OUT_SDO1 = 7,\n GPIO_PPS_OUT_SCK1OUT = 8,\n GPIO_PPS_OUT_SS1OUT = 9,\n GPIO_PPS_OUT_SDO2 = 10,\n GPIO_PPS_OUT_SCK2OUT = 11,\n GPIO_PPS_OUT_SS2OUT = 12,\n GPIO_PPS_OUT_OC1 = 18,\n GPIO_PPS_OUT_OC2 = 19,\n GPIO_PPS_OUT_OC3 = 20,\n GPIO_PPS_OUT_OC4 = 21,\n GPIO_PPS_OUT_OC5 = 22\n} gpio_pps_output_function_t;\n\ntypedef enum\n{\n GPIO_CN_PIN_LORA_DIO0,\n GPIO_CN_PIN_LORA_DIO1,\n GPIO_CN_PIN_LORA_DIO2,\n GPIO_CN_PIN_LORA_DIO3,\n GPIO_CN_PIN_LORA_DIO4,\n GPIO_CN_PIN_LORA_DIO5,\n} gpio_cn_pin_t;\n\ntypedef void (*gpio_cn_callback_t)(bool rising);\n\ntypedef struct gpio_cn_pin_info_t\n{\n bool state;\n bool cn_enabled;\n gpio_cn_callback_t callback;\n} gpio_cn_pin_info_t;\n\ntypedef struct gpio_cn_pin_status_t\n{\n gpio_cn_pin_info_t lora_dio0;\n gpio_cn_pin_info_t lora_dio1;\n gpio_cn_pin_info_t lora_dio2;\n gpio_cn_pin_info_t lora_dio3;\n gpio_cn_pin_info_t lora_dio4;\n gpio_cn_pin_info_t lora_dio5;\n} gpio_cn_pin_status_t;\n\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n * @brief Sets up the GPIOs to their default state.\n */\nvoid gpio_init(void);\n\n/**\n * @brief Registers a callback function to be called when the specified pin\n * triggers a change notification.\n * @details The callback function will run in isr context.\n * @param pin - Pin to register the callback for.\n * @param callback - Function to call at a change notification event.\n */\nvoid gpio_register_cn_handler(gpio_cn_pin_t pin, gpio_cn_callback_t callback);\n\n/**\n * @brief Enables/disables a change notification callback to be generated.\n * @param pin - Pin to enable/disable change notification for.\n * @param enable - True if change notification should be enabed, false if\n * it should be disabled.\n */\nvoid gpio_enable_cn(gpio_cn_pin_t pin, bool enable);\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* GPIO_H */\n\n" }, { "alpha_fraction": 0.5289651155471802, "alphanum_fraction": 0.5354330539703369, "avg_line_length": 23.860139846801758, "blob_id": "c887d18bbd61b6d999fd86948a9aa83c9b86abef", "content_id": "6dd428b4ce4183e08e4b4389a72f75eea35229e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3556, "license_type": "no_license", "max_line_length": 80, "num_lines": 143, "path": "/life_jacket.X/inc/hal/uart.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef UART_H\n#define\tUART_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n#define UART_STRING_BUFFER_SIZE (80)\nextern char g_uart_string_buffer[UART_STRING_BUFFER_SIZE];\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n/**\n * @brief Initializes the UART module\n * @param void\n * @return void\n */\nvoid uart_init();\n\n/**\n * @Brief Shuts down the UART module.\n */\nvoid uart_deinit();\n\n/**\n * @brief Writes a byte over the uart interface.\n * @param data - The data to send.\n * @return void\n */\nvoid uart_write(uint8_t data);\n\n/**\n * @brief Write a string over the uart interface.\n * @param data - The null terminated data to send.\n * @return void\n */\nvoid uart_write_string(const char* data);\n\n/**\n * @brief Write a string over the uart interface.\n * @brief nbr_of_bytes - The number of bytes to send.\n * @param data - The null terminated data to send.\n * @return void\n */\nvoid uart_write_array(uint16_t nbr_of_bytes, const uint8_t* data);\n\n/**\n * @brief Checks if the write buffer is empty.\n * @return True if the write buffer is empty.\n */\nbool uart_is_write_buffer_empty(void);\n\n/**\n * @brief Gets a byte from the receive buffer.\n * @param index - The index of the byte to get.\n * @return The byte in the receive buffer at the specified index.\n */\nuint8_t uart_get(uint16_t index);\n\n/**\n * @brief Gets the size (in number of elements) of the receive buffer.\n * @param void\n * @return The size of the reveive buffer.\n */\nuint16_t uart_get_receive_buffer_size(void);\n\n/**\n * @brief Checks if the receive buffer is empty.\n * @param void\n * @return True if the receive buffer is empty, false otherwise.\n */\nbool uart_is_receive_buffer_empty(void);\n\n/**\n * @brief Clears the receive buffer.\n * @param void\n * @return void\n */\nvoid uart_clear_receive_buffer(void);\n\n/**\n * @brief Enables the UART receive interrupt.\n * @details This interrupt will affect the transmit and receive buffer.\n */\nstatic inline void uart_enable_rx_interrupt()\n{\n IEC1bits.U2RXIE = 1;\n}\n\n/**\n * @brief Disables the UART receive interrupt.\n * @details This interrupt will affect the transmit and receive buffer.\n */\nstatic inline void uart_disable_rx_interrupt()\n{\n IEC1bits.U2RXIE = 0;\n}\n\n/**\n * @brief Enables the UART transmit interrupt.\n * @details This interrupt will affect the transmit buffer.\n */\nstatic inline void uart_enable_tx_interrupt()\n{\n IEC1bits.U2TXIE = 1;\n}\n\n/**\n * @brief Disables the UART transmit interrupt.\n * @details This interrupt will affect the transmit buffer.\n */\nstatic inline void uart_disable_tx_interrupt()\n{\n IEC1bits.U2TXIE = 0;\n}\n\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* UART_H */\n\n" }, { "alpha_fraction": 0.4376254677772522, "alphanum_fraction": 0.4579868018627167, "avg_line_length": 27.695472717285156, "blob_id": "0046ed87f365b2924ec4a409325d8fee84465022", "content_id": "6c0ed0fb92b66d303b6297d4081192f033fba157", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6974, "license_type": "no_license", "max_line_length": 80, "num_lines": 243, "path": "/life_jacket.X/src/hal/gpio.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n\n#include \"hal/gpio.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic volatile gpio_cn_pin_status_t cn_pins;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nvoid gpio_handle_cn_pin_state_update(gpio_cn_pin_info_t * pin, bool new_state);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid gpio_init(void)\n{\n LORA_DIO0_DIR = DIR_IN;\n LORA_DIO1_DIR = DIR_IN;\n LORA_DIO2_DIR = DIR_IN;\n LORA_DIO3_DIR = DIR_IN;\n // DIO4 is input only\n LORA_DIO5_DIR = DIR_IN;\n\n LORA_N_CS_DIR = DIR_OUT;\n LORA_CS_OFF;\n\n LORA_N_RESET_DIR = DIR_OUT;\n LORA_RESET_ON;\n\n LORA_ANT_SEL1_DIR = DIR_OUT;\n LORA_ANT_SEL2_DIR = DIR_OUT;\n LORA_SELECT_ANT_1;\n\n GPS_TIMEPULSE_DIR = DIR_IN;\n GPS_ON_OFF_PIN = 0;\n GPS_ON_OFF_DIR = DIR_OUT;\n GPS_ON_OFF_PIN = 0;\n GPS_TXD_DIR = DIR_OUT;\n GPS_RXD_DIR = DIR_IN;\n GPS_ANT_SEL1_DIR = DIR_OUT;\n GPS_ANT_SEL2_DIR = DIR_OUT;\n GPS_SELECT_ANT_2;\n\n PCM1770_SCKI_DIR = DIR_OUT;\n PCM1770_N_CS_DIR = DIR_OUT;\n PCM1770_CS_OFF;\n PCM1770_N_PD_DIR = DIR_OUT;\n PCM1770_N_PD_PIN = 0;\n PCM1770_BCK_DIR = DIR_OUT;\n PCM1770_DATA_DIR = DIR_OUT;\n PCM1770_LRCK_DIR = DIR_OUT;\n\n AMP_N_SD_DIR = DIR_OUT;\n AMP_N_SD_PIN = 0;\n AMP_N_SD_BOOST_DIR = DIR_OUT;\n AMP_N_SD_BOOST_PIN = 0;\n\n ACC_N_CS_DIR = DIR_OUT;\n ACC_CS_OFF;\n\n FLASH_N_CS_DIR = DIR_OUT;\n FLASH_CS_OFF;\n\n SPI_CLK_DIR = DIR_OUT;\n SPI_MOSI_DIR = DIR_OUT;\n SPI_MISO_DIR = DIR_IN;\n\n DEBUG_UART_RX_DIR = DIR_IN;\n DEBUG_UART_TX_DIR = DIR_OUT;\n\n // No analog io\n ANSA = 0;\n ANSB = 0;\n ANSC = 0;\n\n // SPI interface\n SPI_CLK_PPS_REG = GPIO_PPS_OUT_SCK1OUT;\n SPI_MOSI_PPS_REG = GPIO_PPS_OUT_SDO1;\n SPI_MISO_PPS_REG = SPI_MISO_RP_PIN;\n\n // I2S interface\n PCM1770_BCK_PPS_REG = GPIO_PPS_OUT_SCK2OUT;\n PCM1770_DATA_PPS_REG = GPIO_PPS_OUT_SDO2;\n PCM1770_LRCK_PPS_REG = GPIO_PPS_OUT_SS2OUT;\n\n // Debug UART\n DEBUG_UART_TX_PPS_REG = GPIO_PPS_OUT_U2TX;\n DEBUG_UART_RX_PPS_REG = DEBUG_UART_RX_RP_PIN;\n\n // GPS UART\n GPS_TXD_PPS_REG = GPIO_PPS_OUT_U1TX;\n GPS_RXD_PPS_REG = GPS_RXD_RP_PIN;\n\n //\n // Unused pins\n //\n NC1_PULL_DOWN = 1;\n\n NC2_PIN = 0;\n NC2_DIR = DIR_OUT;\n NC2_PIN = 0;\n\n PGD1_PULL_DOWN = 1;\n PGC1_PULL_DOWN = 1;\n\n memset((void*)&cn_pins, sizeof(cn_pins), 0);\n\n // Enable change notification interrupts\n IEC1bits.CNIE = 1;\n}\n\nvoid gpio_register_cn_handler(gpio_cn_pin_t pin, gpio_cn_callback_t callback)\n{\n switch (pin)\n {\n case GPIO_CN_PIN_LORA_DIO0:\n cn_pins.lora_dio0.callback = callback;\n break;\n\n case GPIO_CN_PIN_LORA_DIO1:\n cn_pins.lora_dio1.callback = callback;\n break;\n\n case GPIO_CN_PIN_LORA_DIO2:\n cn_pins.lora_dio2.callback = callback;\n break;\n\n case GPIO_CN_PIN_LORA_DIO3:\n cn_pins.lora_dio3.callback = callback;\n break;\n\n case GPIO_CN_PIN_LORA_DIO4:\n cn_pins.lora_dio4.callback = callback;\n break;\n\n case GPIO_CN_PIN_LORA_DIO5:\n cn_pins.lora_dio5.callback = callback;\n break; \n }\n}\n\nvoid gpio_enable_cn(gpio_cn_pin_t pin, bool enable)\n{\n switch (pin)\n {\n case GPIO_CN_PIN_LORA_DIO0:\n cn_pins.lora_dio0.cn_enabled = enable;\n LORA_DIO0_CNEN = enable;\n cn_pins.lora_dio0.state = LORA_DIO0_PIN;\n break;\n\n case GPIO_CN_PIN_LORA_DIO1:\n cn_pins.lora_dio1.cn_enabled = enable;\n LORA_DIO1_CNEN = enable;\n cn_pins.lora_dio1.state = LORA_DIO1_PIN;\n break;\n\n case GPIO_CN_PIN_LORA_DIO2:\n cn_pins.lora_dio2.cn_enabled = enable;\n LORA_DIO2_CNEN = enable;\n cn_pins.lora_dio2.state = LORA_DIO2_PIN;\n break;\n\n case GPIO_CN_PIN_LORA_DIO3:\n cn_pins.lora_dio3.cn_enabled = enable;\n LORA_DIO3_CNEN = enable;\n cn_pins.lora_dio3.state = LORA_DIO3_PIN;\n break;\n\n case GPIO_CN_PIN_LORA_DIO4:\n cn_pins.lora_dio4.cn_enabled = enable;\n LORA_DIO4_CNEN = enable;\n cn_pins.lora_dio4.state = LORA_DIO4_PIN;\n break;\n\n case GPIO_CN_PIN_LORA_DIO5:\n cn_pins.lora_dio5.cn_enabled = enable;\n LORA_DIO5_CNEN = enable;\n cn_pins.lora_dio5.state = LORA_DIO5_PIN;\n break;\n }\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nvoid gpio_handle_cn_pin_state_update(gpio_cn_pin_info_t * pin, bool new_state)\n{\n if (pin->cn_enabled &&\n (new_state != pin->state))\n {\n if (NULL != pin->callback)\n {\n pin->callback(new_state); \n }\n }\n\n pin->state = new_state;\n}\n\nvoid __attribute__((interrupt, no_auto_psv)) _CNInterrupt(void)\n{\n IFS1bits.CNIF = 0;\n\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio0,\n LORA_DIO0_PIN);\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio1,\n LORA_DIO1_PIN);\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio2,\n LORA_DIO2_PIN);\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio3,\n LORA_DIO3_PIN);\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio4,\n LORA_DIO4_PIN);\n gpio_handle_cn_pin_state_update((gpio_cn_pin_info_t*)&cn_pins.lora_dio5,\n LORA_DIO5_PIN);\n}\n\n" }, { "alpha_fraction": 0.4239426255226135, "alphanum_fraction": 0.43161019682884216, "avg_line_length": 25.592105865478516, "blob_id": "60cd23ba8853934c625c2cadaa19728d92fe7fad", "content_id": "32eaf1f9b07fd938c487fe54e748e62137987b0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4043, "license_type": "no_license", "max_line_length": 80, "num_lines": 152, "path": "/life_jacket.X/src/lora/lora_tx_queue.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"lora\\lora_tx_queue.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <stdio.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"lora/rfm95w.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n#define LORA_TX_QUEUE_MAX_MESSAGE_LENGTH (128)\n\ntypedef struct lora_tx_element_t\n{\n uint8_t data[LORA_TX_QUEUE_MAX_MESSAGE_LENGTH];\n uint8_t length;\n} lora_tx_element_t;\n\n#define LORA_TX_QUEUE_LENGTH (4)\n\ntypedef struct lora_tx_queue_t\n{\n lora_tx_element_t elements[LORA_TX_QUEUE_LENGTH];\n uint8_t first; // new elements are added to index \"first\"\n uint8_t last; // index of oldest element\n uint8_t size; // number of elements in the queue\n} lora_tx_queue_t;\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic lora_tx_queue_t lora_tx_queue;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic void lora_tx_queue_increment_first(void);\nstatic void lora_tx_queue_increment_last(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid lora_tx_queue_init(void)\n{\n memset(&lora_tx_queue, sizeof(lora_tx_queue_t), 0);\n}\n\nbool lora_tx_queue_is_empty(void)\n{\n return (0 == lora_tx_queue.size);\n}\n\nvoid lora_tx_queue_append(const lora_tx_queue_element_t * element)\n{\n lora_tx_element_t * first_element;\n\n if (LORA_TX_QUEUE_LENGTH == lora_tx_queue.size)\n {\n return;\n }\n\n if (lora_tx_queue.size)\n {\n lora_tx_queue_increment_first();\n }\n\n first_element = &lora_tx_queue.elements[lora_tx_queue.first];\n\n memcpy(first_element->data,\n element->data,\n element->length);\n first_element->length = element->length;\n\n lora_tx_queue.size += 1;\n}\n\nvoid lora_tx_queue_peek(lora_tx_queue_element_t * element)\n{\n element->data = &(lora_tx_queue.elements[lora_tx_queue.last].data[0]);\n element->length = lora_tx_queue.elements[lora_tx_queue.last].length;\n}\n\nvoid lora_tx_queue_transmit_and_pop(void)\n{\n lora_tx_queue_element_t element;\n\n if (0 == lora_tx_queue.size)\n {\n return;\n }\n\n lora_tx_queue_peek(&element);\n\n rfm95w_clear_tx_fifo();\n rfm95w_write_tx_fifo(element.data,\n element.length,\n 0);\n\n rfm95w_start_tx(RFM95W_MAX_RETRANSMISSION_COUNT, true);\n\n if (1 != lora_tx_queue.size)\n {\n lora_tx_queue_increment_last();\n }\n\n lora_tx_queue.size -= 1;\n}\n\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic void lora_tx_queue_increment_first(void)\n{\n lora_tx_queue.first += 1;\n\n if (LORA_TX_QUEUE_LENGTH == lora_tx_queue.first)\n {\n lora_tx_queue.first = 0;\n }\n}\n\nstatic void lora_tx_queue_increment_last(void)\n{\n lora_tx_queue.last += 1;\n\n if (LORA_TX_QUEUE_LENGTH == lora_tx_queue.last)\n {\n lora_tx_queue.last = 0;\n }\n}\n\n" }, { "alpha_fraction": 0.4859529137611389, "alphanum_fraction": 0.48614275455474854, "avg_line_length": 29.627906799316406, "blob_id": "122c408eda1f79dbd85bd33bfa21715f0da5b28d", "content_id": "e54720a0f7f1bd00ba025c37aadc555763dc5d12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5270, "license_type": "no_license", "max_line_length": 105, "num_lines": 172, "path": "/FlashLoader/Main.cs", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.ComponentModel;\nusing System.Data;\nusing System.Drawing;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.Windows.Forms;\nusing System.IO;\n\nnamespace FlashLoader\n{\n public partial class Main : Form\n {\n enum UnitInterfaceWorkerState { OFF, READING, WRITING };\n\n private UnitInterfaceWorkerState interfaceWorkerState = UnitInterfaceWorkerState.OFF;\n private Unit Unit = null;\n\n\n\n public Main()\n {\n InitializeComponent();\n }\n\n private void closeButton_Click(object sender, EventArgs e)\n {\n if (null != Unit)\n {\n Unit.Close();\n Unit = null;\n StatusTextBox.Text += \"Connection closed\\r\\n\";\n }\n\n writeButton.Enabled = false;\n openButton.Enabled = true;\n }\n\n private void writeButton_Click(object sender, EventArgs e)\n {\n if (File.Exists(WriteFileTextBox.Text))\n {\n StatusTextBox.Text += \"Starting write operation\\r\\n\";\n openButton.Enabled = false;\n closeButton.Enabled = false;\n unitInterfaceWorker.RunWorkerAsync(UnitInterfaceWorkerState.WRITING);\n }\n else\n {\n StatusTextBox.Text += \"Invalid file name\\r\\n\";\n }\n }\n\n private void openButton_Click(object sender, EventArgs e)\n {\n bool alreadyOpen = false;\n\n if (null != Unit)\n {\n if (Unit.isOpen())\n {\n alreadyOpen = true;\n writeButton.Enabled = true;\n openButton.Enabled = true;\n }\n }\n\n if (!alreadyOpen)\n {\n Unit = new Unit(ComPortTextBox.Text, unitInterfaceWorker);\n\n if (Unit.isOpen())\n {\n StatusTextBox.Text += \"Connection opened\\r\\n\";\n writeButton.Enabled = true;\n openButton.Enabled = true;\n closeButton.Enabled = true;\n }\n else\n {\n StatusTextBox.Text += \"Connection failed\\r\\n\";\n }\n }\n }\n\n private void readButton_Click(object sender, EventArgs e)\n {\n if (File.Exists(ReadFileTextBox.Text))\n {\n StatusTextBox.Text += \"Starting read operation\\r\\n\";\n openButton.Enabled = false;\n closeButton.Enabled = false;\n unitInterfaceWorker.RunWorkerAsync(UnitInterfaceWorkerState.READING);\n }\n else\n {\n StatusTextBox.Text += \"Invalid file name\\r\\n\";\n }\n }\n\n private void unitInterfaceWorker_DoWork(object sender, DoWorkEventArgs e)\n {\n interfaceWorkerState = (UnitInterfaceWorkerState)e.Argument;\n\n switch (interfaceWorkerState)\n {\n case UnitInterfaceWorkerState.OFF:\n break;\n\n case UnitInterfaceWorkerState.READING:\n Unit.ReadConfig(ReadFileTextBox.Text);\n break;\n\n case UnitInterfaceWorkerState.WRITING:\n Unit.WriteConfig(WriteFileTextBox.Text);\n break;\n\n default:\n interfaceWorkerState = UnitInterfaceWorkerState.OFF;\n break;\n }\n }\n\n private void unitInterfaceWorker_RunWorkerCompleted(object sender, RunWorkerCompletedEventArgs e)\n {\n switch (interfaceWorkerState)\n {\n case UnitInterfaceWorkerState.READING:\n openButton.Enabled = true;\n closeButton.Enabled = true;\n if (Unit.isOpen())\n {\n StatusTextBox.Text += \"Configuration read\\r\\n\";\n }\n else\n {\n StatusTextBox.Text += \"Operation failed, port closed\\r\\n\";\n writeButton.Enabled = false;\n openButton.Enabled = false;\n }\n break;\n\n case UnitInterfaceWorkerState.WRITING:\n openButton.Enabled = true;\n closeButton.Enabled = true;\n if (Unit.isOpen())\n {\n StatusTextBox.Text += \"Config written\\r\\n\";\n }\n else\n {\n StatusTextBox.Text += \"Operation failed, port closed\\r\\n\";\n writeButton.Enabled = false;\n openButton.Enabled = false;\n }\n break;\n }\n }\n\n private void unitInterfaceWorker_ProgressChanged(object sender, ProgressChangedEventArgs e)\n {\n progressBar.Value = e.ProgressPercentage;\n }\n\n private void label3_Click(object sender, EventArgs e)\n {\n\n }\n }\n}\n" }, { "alpha_fraction": 0.34638166427612305, "alphanum_fraction": 0.41127994656562805, "avg_line_length": 33.362831115722656, "blob_id": "2ebd60fcd3b3a035951cb48bc804ccd1f0710f95", "content_id": "91cba2ee6c808928d51d8c488a0ca3befa9c5b6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3883, "license_type": "no_license", "max_line_length": 80, "num_lines": 113, "path": "/life_jacket.X/src/acc/accelerometer.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"acc/accelerometer.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n\n#include <xc.h>\n\n#include \"hal/gpio.h\"\n#include \"acc/lis2hh12_io.h\"\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n\n// =============================================================================\n// Private constants\n// =============================================================================\nstatic const uint8_t WHO_AM_I_REG_VALUE = 0x41;\n\nstatic const uint8_t LIS2HH12_REG_INIT_TABLE[][2] =\n{\n {LIS2HH12_REG_ACT_THS, 0x00},\n {LIS2HH12_REG_ACT_DUR, 0x00},\n {LIS2HH12_REG_CTRL1, 0x17},\n {LIS2HH12_REG_CTRL2, 0x00},\n {LIS2HH12_REG_CTRL3, 0x00},\n {LIS2HH12_REG_CTRL4, 0x04},\n {LIS2HH12_REG_CTRL5, 0x00},\n {LIS2HH12_REG_CTRL6, 0x00},\n {LIS2HH12_REG_CTRL7, 0x00},\n {LIS2HH12_REG_FIFO_CTRL, 0x00},\n {LIS2HH12_REG_IG_CFG1, 0x00},\n {LIS2HH12_REG_IG_THS_X1, 0x00},\n {LIS2HH12_REG_IG_THS_Y1, 0x00},\n {LIS2HH12_REG_IG_THS_Z1, 0x00},\n {LIS2HH12_REG_IG_DUR1, 0x00},\n {LIS2HH12_REG_CFG2, 0x00},\n {LIS2HH12_REG_THS2, 0x00},\n {LIS2HH12_REG_DUR2, 0x00},\n {LIS2HH12_REG_XL_REFERENCE, 0x00},\n {LIS2HH12_REG_XH_REFERENCE, 0x00},\n {LIS2HH12_REG_YL_REFERENCE, 0x00},\n {LIS2HH12_REG_YH_REFERENCE, 0x00},\n {LIS2HH12_REG_ZL_REFERENCE, 0x00},\n {LIS2HH12_REG_ZH_REFERENCE, 0x00},\n};\n\n#define INIT_TABLE_ENTRY_COUNT (sizeof(LIS2HH12_REG_INIT_TABLE) / 2)\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid accelerometer_init(void)\n{\n uint8_t i;\n\n if (WHO_AM_I_REG_VALUE == lis2hh12_read_register(LIS2HH12_REG_WHO_AM_I))\n {\n debug_log_append_line(\"LIS2HH12 communication ok\");\n }\n else\n {\n debug_log_append_line(\"LIS2HH12 communication failiure\");\n return;\n }\n\n for (i = 0; i != INIT_TABLE_ENTRY_COUNT; ++i)\n {\n lis2hh12_write_register(LIS2HH12_REG_INIT_TABLE[i][0],\n LIS2HH12_REG_INIT_TABLE[i][1]);\n }\n}\n\nvoid accelerometer_get_orientation(accelerometer_output_t * out)\n{\n memset(out, 0, sizeof(accelerometer_output_t));\n\n out->x |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_X_H)) << 8;\n out->x |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_X_L));\n\n out->y |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_Y_H)) << 8;\n out->y |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_Y_L));\n\n out->z |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_Z_H)) << 8;\n out->z |= ((uint16_t)lis2hh12_read_register(LIS2HH12_REG_OUT_Z_L));\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n" }, { "alpha_fraction": 0.4389522671699524, "alphanum_fraction": 0.4528939723968506, "avg_line_length": 29.727272033691406, "blob_id": "cc953ec7c2ca2167d36aad19b9173e4c163262fe", "content_id": "0d0f05ca552354cc1fcbd2bb925962a966c5f5bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2367, "license_type": "no_license", "max_line_length": 81, "num_lines": 77, "path": "/life_jacket.X/inc/hal/spi_hal.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef SPI_HAL_H\n#define SPI_HAL_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n\tSPI_DEVICE_NULL,\n SPI_DEVICE_RFM95W,\n SPI_DEVICE_LIS2HH12,\n SPI_DEVICE_PCM1770,\n SPI_DEVICE_EXT_FLASH\n} spi_hal_device_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Sets up the SPI interface for communication with one external device.\n @param device - Device to set up the SPI interface for.\n*/\nvoid spi_hal_setup_for_device(spi_hal_device_t device);\n\n/**\n @brief Performs one 16bit spi session.\n @details This is a blocking operation, and takes control over the CS pin.\n @param v - Value to transmit.\n @return Received value.\n*/\nuint16_t spi_hal_tranceive16(uint16_t v);\n\n/**\n @brief Performs one 8bit spi session.\n @details This is a blocking operation, and does no take control over the CS pin.\n @param v - Value to transmit.\n @return Received value.\n*/\nuint8_t spi_hal_tranceive8(uint8_t v);\n\n/**\n @brief Performs a series of 16bit spi transmissions.\n @details This is a blocking operation, and does no take control over the CS pin.\n 0x00 is sent while receiving.\n Length must be a multiple of 4.\n @param read_data - Where to store the read data.\n @param length - Number of bytes in the transmission.\n*/\nvoid spi_hal_read16_block(uint16_t * read_data,\n uint16_t length);\n \n#ifdef __cplusplus\n}\n#endif\n\n#endif /* SPI_HAL_H */\n\n" }, { "alpha_fraction": 0.4134555757045746, "alphanum_fraction": 0.42150911688804626, "avg_line_length": 28.672794342041016, "blob_id": "833f1cf6b759b3f56e8b7bffa2496f001bd7e23b", "content_id": "3d09395b9f11ae8b3f8c9051eb00ab739e54e2a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 8073, "license_type": "no_license", "max_line_length": 111, "num_lines": 272, "path": "/FlashLoader/Unit.cs", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Threading.Tasks;\nusing System.IO;\nusing System.IO.Ports;\nusing System.Globalization;\nusing System.Threading;\nusing System.ComponentModel;\n\nnamespace FlashLoader\n{\n class Unit\n {\n private SerialPort LpuartInterface = null;\n private int BAUD_RATE = 38400;\n private bool open = false;\n private int CONFIG_SIZE = 8388608;\n private int PAGE_SIZE = 256;\n private BackgroundWorker worker;\n\n\n\n\n\n /**\n * @brief Initialize the computer interface\n */\n public Unit(string PortName, BackgroundWorker worker)\n {\n string retVal = \"\";\n this.worker = worker;\n\n try\n {\n LpuartInterface = new SerialPort(PortName, BAUD_RATE);\n LpuartInterface.ReadTimeout = 5000; // ms\n LpuartInterface.Open();\n LpuartInterface.NewLine = \"\\r\\n\";\n LpuartInterface.WriteLine(\"set sleep allowed off\");\n retVal = LpuartInterface.ReadLine();\n retVal = LpuartInterface.ReadLine();\n open = retVal.Contains(\"ok\") || retVal.Contains(\"Invalid syntax\");\n\n if (!open)\n {\n LpuartInterface.Close();\n }\n else\n {\n LpuartInterface.WriteLine(\"set debug log enable off\");\n retVal = LpuartInterface.ReadLine();\n retVal = LpuartInterface.ReadLine();\n }\n }\n catch (Exception)\n {\n try\n {\n LpuartInterface.Close();\n }\n catch (Exception)\n { }\n\n open = false;\n }\n }\n\n ~Unit()\n {\n try\n {\n LpuartInterface.Close();\n }\n catch (Exception)\n { }\n }\n\n public void Close()\n {\n open = false;\n\n try\n {\n LpuartInterface.Close();\n }\n catch (Exception)\n { }\n }\n\n /**\n * @brief Checks if the comunication to the PCB is opened.\n */\n public bool isOpen()\n {\n return open;\n }\n\n /**\n * @brief Reads the configuration.\n * @param FileName - where to store the read config.\n */\n public void ReadConfig(string FileName)\n {\n List<byte> config = new List<byte>();\n int reportInterval = 4;\n int nextProgressReport = reportInterval;\n worker.ReportProgress(0);\n\n try\n {\n using (FileStream stream = File.Open(FileName, FileMode.Create))\n using (BinaryWriter writer = new BinaryWriter(stream))\n {\n for (int i = 0; i != CONFIG_SIZE; ++i)\n {\n Byte readVal = 0;\n string returnVal;\n LpuartInterface.WriteLine(\"get flash \" + String.Format(\"{0:X}\", i));\n returnVal = LpuartInterface.ReadLine();\n returnVal = LpuartInterface.ReadLine();\n Byte.TryParse(returnVal, NumberStyles.HexNumber, null as IFormatProvider, out readVal);\n writer.Write(readVal);\n returnVal = LpuartInterface.ReadLine(); // \"ok\"\n\n if (i == nextProgressReport)\n {\n worker.ReportProgress((i * 100) / CONFIG_SIZE);\n nextProgressReport += reportInterval;\n }\n }\n }\n }\n catch (TimeoutException)\n {\n Close();\n }\n }\n\n private bool send_ef_sbp_command(int address, UInt16 value)\n {\n int tryNumber = 0;\n bool commandDone = false;\n \n while (!commandDone && (tryNumber++ < 10))\n {\n try\n {\n string dummy;\n LpuartInterface.WriteLine(\"ef spb \" + String.Format(\"{0:X} {1:X}\", address, value));\n dummy = LpuartInterface.ReadLine();\n dummy = LpuartInterface.ReadLine();\n commandDone = true;\n }\n catch\n {\n ;\n }\n\n if (!commandDone)\n {\n string dummy;\n LpuartInterface.WriteLine(\"restart uart\");\n dummy = LpuartInterface.ReadLine();\n dummy = LpuartInterface.ReadLine();\n }\n }\n\n return commandDone;\n }\n\n private bool send_ef_wp_command(int address)\n {\n int tryNumber = 0;\n bool commandDone = false;\n\n while (!commandDone && (tryNumber++ < 10))\n {\n try\n {\n string dummy;\n LpuartInterface.WriteLine(\"ef wp \" + String.Format(\"{0:X}\", address));\n dummy = LpuartInterface.ReadLine();\n dummy = LpuartInterface.ReadLine();\n commandDone = true;\n }\n catch\n {\n ;\n }\n\n if (!commandDone)\n {\n string dummy;\n LpuartInterface.WriteLine(\"restart uart\");\n dummy = LpuartInterface.ReadLine();\n dummy = LpuartInterface.ReadLine();\n }\n }\n\n return commandDone;\n }\n\n /**\n * @brief Writes a configuration to the PCB.\n * @param FileName - the configuration to be written.\n */\n public void WriteConfig(string FileName)\n {\n byte[] config;\n string dummy;\n int reportInterval = 4;\n int nextProgressReport = reportInterval;\n int chip_erase_progress = 10;\n worker.ReportProgress(0);\n\n try\n {\n\n using (FileStream stream = File.Open(FileName, FileMode.Open))\n using (BinaryReader reader = new BinaryReader(stream))\n {\n config = reader.ReadBytes(CONFIG_SIZE);\n }\n\n LpuartInterface.WriteLine(\"ext flash chip erase\\r\\n\");\n\n bool chip_erase_done = false;\n\n while (!chip_erase_done)\n {\n try\n {\n chip_erase_done = LpuartInterface.ReadLine().Contains(\"ok\");\n } catch (Exception) {;};\n }\n\n worker.ReportProgress(chip_erase_progress);\n\n for (int i = 0; i < CONFIG_SIZE; i += PAGE_SIZE)\n {\n for (int k = 0; k != PAGE_SIZE; k += 2)\n {\n UInt32 address = (UInt32)(i + k);\n\n UInt16 value = (UInt16)(((UInt16)config[address]) * 256);\n value += (UInt16)config[address + 1];\n\n\n if (!send_ef_sbp_command(k / 2, value))\n {\n throw new TimeoutException();\n }\n }\n\n if (!send_ef_wp_command(i))\n {\n throw new TimeoutException();\n }\n \n \n worker.ReportProgress(chip_erase_progress + (i * 100) / CONFIG_SIZE);\n }\n }\n catch (TimeoutException)\n {\n Close();\n }\n }\n }\n}\n" }, { "alpha_fraction": 0.45685070753097534, "alphanum_fraction": 0.46175870299339294, "avg_line_length": 24.45833396911621, "blob_id": "6a13df4ea7a2d8960a552705fa6a507c8b193895", "content_id": "8e623781e0017b706de0e1cbe9fab1ce868b563a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2445, "license_type": "no_license", "max_line_length": 80, "num_lines": 96, "path": "/life_jacket.X/inc/status.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef STATUS_H\n#define\tSTATUS_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n // Error flag\n STATUS_CRITICAL_ERROR_FLAG,\n\n // Event flags\n STATUS_UART_RECEIVE_FLAG,\n STATUS_GPS_ON_EVENT,\n STATUS_GPS_BROADCAST_EVENT,\n STATUS_GPS_HOTSTART_EVENT,\n STATUS_AUDIO_BUFFER_UPDATE_EVENT,\n\n // Status runtime values\n STATUS_BYTE_LAST\n} status_byte_index_t;\n\ntypedef enum\n{\n CRIT_ERR_NO_ERROR = 0,\n} critical_error_t;\n\ntypedef uint8_t status_item_t;\n \n// =============================================================================\n// Global variable declarations\n// =============================================================================\n\nextern volatile status_item_t status_bytes[STATUS_BYTE_LAST];\n\n// =============================================================================\n// Global constatants\n// =============================================================================\n\n#define STATUS_PERIPHERAL_FREQ 16000000\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n * @brief Inititalizes the status variables.\n */\nvoid status_init(void);\n\n/**\n * @brief Checks the value of one status item.\n * @param index - index of the item to check.\n * @return status item to check.\n */\nstatic inline status_item_t status_check(status_byte_index_t index)\n{\n return status_bytes[index];\n}\n\n/**\n * @brief Sets the value of one status item.\n * @param index - index of the item to set the value of.\n * @param val - new value to assign the chosen status item.\n */\nstatic inline void status_set(status_byte_index_t index, status_item_t val)\n{\n status_bytes[index] = val;\n}\n\n/**\n * @brief Sets the value of one status item to 0.\n * @param index - index of the item to set the value of.\n */\nstatic inline void status_clear(status_byte_index_t index)\n{\n status_bytes[index] = 0;\n}\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* STATUS_H */\n\n" }, { "alpha_fraction": 0.4332306683063507, "alphanum_fraction": 0.4624026119709015, "avg_line_length": 26.46268653869629, "blob_id": "c19a7868b2355ddfe30f8e10d0a1cd388c7123fe", "content_id": "f23eeff8814d21d239d8b29af18b47344b1529fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5519, "license_type": "no_license", "max_line_length": 80, "num_lines": 201, "path": "/life_jacket.X/src/hal/clock.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"hal/clock.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n\n#include <xc.h>\n\n#include \"status.h\"\n\n#include \"uart/debug_log.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\nvolatile uint16_t g_clock_gps_on_event_timeout;\nvolatile uint16_t g_clock_gps_off_timeout;\nvolatile uint16_t g_clock_gps_hot_start_timeout_sec;\nvolatile uint16_t g_clock_gps_broadcast_timeout_sec;\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n//\n// HW constants\n//\n\nstatic uint32_t TIMER_CLOCK_FREQ_HZ = CLOCK_HAL_PCBCLOCK_FREQ;\nstatic uint32_t MSEC_TIMER_FREQ_HZ = 1000;\n\n\n#define TCKPS_ONE_TO_ONE_PRESCALE 0\n#define TCKPS_ONE_TO_EIGHT_PRESCALE 1\n#define TCKPS_ONE_TO_SIXTYFOUR_PRESCALE 2\n#define TCKPS_ONE_TO_TWOHUNDRED_AND_FIFTYSIX_PRESCALE 3\n\n#define TCS_INTERNAL_CLOCK 0\n\ntypedef enum\n{\n CLOCK_RTCC_POINTER_YEAR = 0x3,\n CLOCK_RTCC_POINTER_MONTH_DAY = 0x2,\n CLOCK_RTCC_POINTER_WEEKDAY_HOUR = 0x1,\n CLOCK_RTCC_POINTER_MINUTES_SECONDS = 0x0,\n} clock_rtcc_pointer_t;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic volatile uint32_t current_time = 0;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid clock_start_msec_timer(void)\n{\n uint32_t counts;\n\n T1CON = 0x0000;\n T1CONbits.TON = 1;\n\n counts = TIMER_CLOCK_FREQ_HZ / MSEC_TIMER_FREQ_HZ;\n\n if ((counts / 1) < UINT16_MAX)\n {\n T1CONbits.TCKPS = TCKPS_ONE_TO_ONE_PRESCALE;\n PR1 = counts / 1;\n }\n else if ((counts / 8) < UINT16_MAX)\n {\n T1CONbits.TCKPS = TCKPS_ONE_TO_EIGHT_PRESCALE;\n PR1 = counts / 8;\n }\n else if ((counts / 64) < UINT16_MAX)\n {\n T1CONbits.TCKPS = TCKPS_ONE_TO_SIXTYFOUR_PRESCALE;\n PR1 = counts / 64;\n }\n else\n {\n T1CONbits.TCKPS = TCKPS_ONE_TO_TWOHUNDRED_AND_FIFTYSIX_PRESCALE;\n PR1 = counts / 256;\n }\n\n T1CONbits.TCS = TCS_INTERNAL_CLOCK;\n\n IEC0bits.T1IE = 1; // Enable interrupt generation of timer 1\n IPC0bits.T1IP = 4; // Set isr priority\n}\n\nuint32_t clock_get_msec(void)\n{\n volatile uint32_t t1;\n volatile uint32_t t2;\n\n // Avoid incorrect readings of current_time due to preemption by T1 isr.\n do\n {\n t1 = current_time;\n t2 = current_time;\n }\n while (t1 != t2);\n\n return t1;\n}\n\nvoid clock_start_rtc(void)\n{\n __builtin_write_RTCWEN();\n\n RTCPWCbits.RTCLK = 0x01; // RTCLK = internal LPRC osc\n\n ALCFGRPTbits.ALRMEN = 0; // Alarm disable\n ALCFGRPTbits.AMASK = 0x01; // Alarm every second\n ALCFGRPTbits.ARPT = 255; // Alarm repeat count\n ALCFGRPTbits.CHIME = 1; // Repeat alarm indefinitely\n\n RCFGCALbits.RTCPTR = CLOCK_RTCC_POINTER_YEAR;\n RTCVAL = 0x0000; // Year = 2000\n RTCVAL = 0x0101; // Month = 1, day = 1\n RTCVAL = 0x0000; // Sunday, hour = 0\n RTCVAL = 0x0000; // Minute = 0, seconds = 0\n\n ALCFGRPTbits.ALRMPTR = CLOCK_RTCC_POINTER_MONTH_DAY;\n ALRMVAL = 0x0101; // Month = 1, day = 1\n ALRMVAL = 0x0000; // Sunday, hour = 0\n ALRMVAL = 0x0002; // Minute 0, seconds = 2\n\n IFS3bits.RTCIF = 0;\n IEC3bits.RTCIE = 1;\n\n ALCFGRPTbits.ALRMEN = 1; // Alarm enable\n RCFGCALbits.RTCEN = 1; // RTCC module enable\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\n\nvoid __attribute__((interrupt, no_auto_psv)) _RTCCInterrupt(void)\n{\n IFS3bits.RTCIF = 0;\n\n if (g_clock_gps_hot_start_timeout_sec)\n {\n --g_clock_gps_hot_start_timeout_sec;\n\n if (!g_clock_gps_hot_start_timeout_sec)\n {\n status_set(STATUS_GPS_HOTSTART_EVENT, true);\n }\n }\n\n if (g_clock_gps_broadcast_timeout_sec)\n {\n --g_clock_gps_broadcast_timeout_sec;\n\n if (!g_clock_gps_broadcast_timeout_sec)\n {\n status_set(STATUS_GPS_BROADCAST_EVENT, true);\n }\n }\n}\n\nvoid __attribute__((interrupt, no_auto_psv)) _T1Interrupt(void)\n{\n IFS0bits.T1IF = 0;\n\n if (g_clock_gps_on_event_timeout)\n {\n --g_clock_gps_on_event_timeout;\n\n if (!g_clock_gps_on_event_timeout)\n {\n status_set(STATUS_GPS_ON_EVENT, true);\n }\n }\n\n if (g_clock_gps_off_timeout)\n {\n --g_clock_gps_off_timeout;\n }\n\n ++current_time;\n}" }, { "alpha_fraction": 0.41883400082588196, "alphanum_fraction": 0.5687729120254517, "avg_line_length": 35.72435760498047, "blob_id": "547f4bb49be03c749493b4c69567912b79baf59f", "content_id": "72d0d05cf7dfe49911d3784b23eafde503229002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11458, "license_type": "no_license", "max_line_length": 110, "num_lines": 312, "path": "/life_jacket.X/src/hal/flash.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\n * This file allows the use of a part of the instruction memory to be used\n * as non volatile data memory.\n *\n * In this implemententation only the 16 LSB of each instruction (24 bits) will\n * be used for data storage. This of course consumes more memory, but speeds\n * up flash address calculation wich in turn improves read speeds significantly.\n *\n * One instruction in the flash memory:\n *\n * -------------------------------------------------------------\n * | Used as data memory | Unused |\n * -------------------------------------------------------------\n * BIT0 - BIT15 BIT16 - BIT23\n *\n *\n * References:\n * Document number: DS30009715C, PIC24F Flash Program Memory\n * Document number: DS39715A, Section 4. Program Memory\n * Document number: DS39747D, PIC24FJ128GA010 Family Data Sheet\n * Document number: DS51284F, MPLAB C30 C COMPILER USER?S GUIDE\n * \n */\n\n\n\n\n// =============================================================================\n// Include statements\n// =============================================================================\n#include \"hal/flash.h\"\n\n#include <stdbool.h>\n#include <stdint.h>\n\n#include <xc.h>\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\n#define INSTRUCTIONS_PER_ROW 64\n#define ROWS_PER_ERASE_BLOCK 8\n#define INSTRUCTIONS_PER_ERASE_BLOCK 512\n#define WORDS_PER_ERASE_BLOCK (INSTRUCTIONS_PER_ERASE_BLOCK * 2)\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic volatile uint8_t buffer[FLASH_MEM_SIZE];\n\nconst uint16_t flash_data[WORDS_PER_ERASE_BLOCK] __attribute__((space(prog),aligned(WORDS_PER_ERASE_BLOCK))) =\n{\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0000\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0008\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0010\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0018\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0020\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0028\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0030\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0038\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0040\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0048\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0050\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0058\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0060\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0068\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0070\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0078\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0080\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0088\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0090\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x0098\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00A0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00A8\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00B0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00B8\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00C0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00C8\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00D0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00D8\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00E0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00E8\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00F0\n 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 // 0x00F8\n};\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n\n/**\n * @brief Erases the address page used for data.\n */\nstatic void erase_flash_data(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid flash_init(void)\n{\n ;\n}\n\nuint8_t flash_read_byte(flash_index_t index)\n{\n uint16_t data;\n uint8_t ret_val;\n data = flash_read_word(index & ~0x0001);\n\n if (index & 0x0001)\n {\n ret_val = (uint8_t)data;\n }\n else\n {\n ret_val = (uint8_t)(data >> 8);\n }\n\n return ret_val;\n}\n\nuint16_t flash_read_word(flash_index_t index)\n{\n uint16_t addr_offset;\n uint16_t read_word;\n uint16_t index_offset = (uint16_t)index & 0xFFFE;\n bool odd_addr = (0 != ((uint16_t)index & 0x0001));\n\n TBLPAG = __builtin_tblpage(flash_data);\n addr_offset = __builtin_tbloffset(flash_data) + index_offset;\n asm(\"tblrdl.w [%1], %0\" : \"=r\"(read_word) : \"r\"(addr_offset));\n\n if (odd_addr)\n {\n uint16_t high_word;\n\n index_offset += 1;\n TBLPAG = __builtin_tblpage(flash_data);\n addr_offset = __builtin_tbloffset(flash_data) + index_offset;\n asm(\"tblrdl.w [%1], %0\" : \"=r\"(high_word) : \"r\"(addr_offset));\n\n read_word = (read_word << 8) | (0x00FF & (high_word >> 8));\n }\n\n return read_word;\n}\n\nuint32_t flash_read_dword(flash_index_t index)\n{\n uint32_t dword = 0x00000000;\n\n dword |= ((uint32_t)flash_read_word(index) << 16) & (uint32_t)0xFFFF0000;\n dword |= (uint32_t)flash_read_word(index + 2) & (uint32_t)0x0000FFFF;\n\n return dword;\n}\n\nvoid flash_init_write_buffer(void)\n{\n uint16_t i;\n\n for (i = 0; i != FLASH_MEM_SIZE; i += 2)\n {\n uint16_t d = flash_read_word(i);\n buffer[i] = d >> 8;\n buffer[i + 1] = d;\n }\n}\n\nvoid flash_write_byte_to_buffer(flash_index_t index, uint8_t data)\n{\n buffer[index] = data;\n}\n\nvoid flash_write_word_to_buffer(flash_index_t index, uint16_t data)\n{\n buffer[index] = (uint8_t)((data >> 8) & 0xFF);\n buffer[index + 1] = (uint8_t)( data & 0xFF);\n}\n\nvoid flash_write_dword_to_buffer(flash_index_t index, uint32_t data)\n{\n buffer[index] = (uint8_t)((data >> 24) & 0xFF);\n buffer[index + 1] = (uint8_t)((data >> 16) & 0xFF);\n buffer[index + 2] = (uint8_t)((data >> 8 ) & 0xFF);\n buffer[index + 3] = (uint8_t)( data & 0xFF);\n}\n\n/*\n * From DS39715A-page 4-17:\n *\n\nThe user can program one row of program Flash memory at a time. To do this, it is necessary to\nerase the 8-row erase block containing the desired row. The general process is:\n1. Read eight rows of program memory (512 instructions) and store in data RAM.\n2. Update the program data in RAM with the desired new data.\n3. Erase the block:\na) Set the NVMOP bits (NVMCOM<3:0>) to ?0010? to configure for block erase. Set the\nERASE (NVMCOM<6>) and WREN (NVMCOM<14>) bits.\nb) Write the starting address of the block to be erased into the TBLPAG and W registers.\nc) Write 55h to NVMKEY.\nd) Write AAh to NVMKEY.\ne) Set the WR bit (NVMCOM<15>). The erase cycle begins and the CPU stalls for the\nduration of the erase cycle. When the erase is done, the WR bit is cleared automatically.\n4. Write the first 64 instructions from data RAM into the program memory buffers (see Section 4.5\n?Program Memory Writes?).\n5. Write the program block to Flash memory:\na) Set the NVMOP bits to ?0001? to configure for row programming. Clear the ERASE\nbit and set the WREN bit.\nb) Write 55h to NVMKEY.\nc) Write AAh to NVMKEY.\nd) Set the WR bit. The programming cycle begins and the CPU stalls for the duration of the\nwrite cycle. When the write to Flash memory is done, the WR bit is cleared automatically.\n6. Repeat steps 4 and 5, using the next available 64 instructions from the block in data RAM\nby incrementing the value in TBLPAG, until all 512 instructions are written back to Flash\nmemory.\nFor protection against accidental operations, the write initiate sequence for NVMKEY must be\nused to allow any erase or program operation to proceed. After the programming command has\nbeen executed, the user must wait for the programming time until programming is complete. The\ntwo instructions following the start of the programming sequence should be NOPs, as shown in\nSection 4.6.4.2 ?NVMKEY Register?.\n\n */\nvoid flash_write_buffer_to_flash(void)\n{\n uint16_t row;\n uint16_t instr;\n uint16_t offset;\n uint16_t buffer_index;\n\n erase_flash_data();\n\n // Memory row program operation (ERASE = 0) or no operation (ERASE = 1)\n NVMCONbits.NVMOP = 1;\n NVMCONbits.ERASE = 0;\n NVMCONbits.WREN = 1;\n\n TBLPAG = __builtin_tblpage(flash_data);\n offset = __builtin_tbloffset(flash_data);\n buffer_index = 0;\n\n for (row = 0; row != ROWS_PER_ERASE_BLOCK; ++row)\n {\n uint16_t row_offset = row * (uint16_t)INSTRUCTIONS_PER_ROW * 2;\n \n for (instr = 0; instr != INSTRUCTIONS_PER_ROW; ++instr)\n {\n uint16_t low_word;\n uint8_t dummy_data = 0x00;\n uint16_t addr = offset + (instr * 2) + row_offset;\n\n low_word = (uint16_t)buffer[buffer_index++] << 8;\n low_word |= (uint16_t)buffer[buffer_index++];\n\n __builtin_tblwtl(addr, low_word);\n __builtin_tblwth(addr, dummy_data);\n }\n __builtin_disi(5);\n __builtin_write_NVM();\n\n while (NVMCONbits.WR)\n {\n ;\n }\n } \n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\n/**\n * Reference: DS30009715C-page 16\n */\nstatic void erase_flash_data(void)\n{\n uint16_t addr_offset;\n\n NVMCONbits.NVMOP = 0x2; // Erase block\n\n // Perform the erase operation specified by NVMOP3:NVMOP0 on the next WR command\n NVMCONbits.ERASE = 1;\n\n // Enable Flash program/erase operations\n NVMCONbits.WREN = 1;\n\n // Set up the address\n TBLPAG = __builtin_tblpage(flash_data);\n addr_offset = __builtin_tbloffset(flash_data);\n __builtin_tblwtl(addr_offset, 0); // Dummy TBLWT to load address\n\n // Start sequence accoding to doc: DS39715A-page 4-16\n __builtin_disi(5);\n __builtin_write_NVM(); \n\n while (NVMCONbits.WR)\n {\n ;\n }\n}\n" }, { "alpha_fraction": 0.41172316670417786, "alphanum_fraction": 0.4176082909107208, "avg_line_length": 28.901409149169922, "blob_id": "9402db90ec0873d097a9b240f393835b38b8390d", "content_id": "e95022ff0464ba060e2fdbcd99f10e04937ffc3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4248, "license_type": "no_license", "max_line_length": 80, "num_lines": 142, "path": "/life_jacket.X/src/gps/gps.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include \"gps/gps.h\"\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n\n#include \"gps/nmea.h\"\n#include \"gps/nmea_queue.h\"\n#include \"gps/jf2_io.h\"\n#include \"gps/jf2_uart.h\"\n#include \"status.h\"\n#include \"hal/clock.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\ntypedef enum\n{\n GPS_STATE_COLD_START_PRE_FIX,\n GPS_STATE_COLD_START_POST_FIX,\n GPS_STATE_HOT_START_PRE_FIX,\n GPS_STATE_HOT_START_POST_FIX,\n GPS_STATE_IDLE\n} gps_state_t;\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\nstatic const uint16_t GPS_COLD_START_OFF_TIMEOUT_MS = 5000;\nstatic const uint16_t GPS_HOT_START_OFF_TIMEOUT_MS = 1000;\nstatic const uint16_t GPS_HOT_START_INTERVAL_SEC = 60;\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\nstatic gps_state_t gps_state;\n\nstatic nmea_coordinates_info_t coordinates;\nstatic bool coordinates_set = false;\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid gps_init(void)\n{\n gps_state = GPS_STATE_COLD_START_PRE_FIX;\n\n nmea_reset_on_lock_event();\n jf2_uart_init();\n nmea_queue_init(nmea_queue_get_rx_queue());\n nmea_queue_init(nmea_queue_get_tx_queue());\n g_clock_gps_on_event_timeout = JF2_IO_RTC_STARTUP_TIME_MS;\n}\n\nvoid gps_poll(void)\n{\n if (GPS_STATE_COLD_START_PRE_FIX == gps_state)\n {\n if (nmea_check_on_lock_event())\n {\n g_clock_gps_off_timeout = GPS_COLD_START_OFF_TIMEOUT_MS;\n gps_state = GPS_STATE_COLD_START_POST_FIX;\n }\n }\n if (GPS_STATE_COLD_START_POST_FIX == gps_state)\n {\n if (0 == g_clock_gps_off_timeout)\n {\n jf2_io_send_on_pulse();\n g_clock_gps_hot_start_timeout_sec = GPS_HOT_START_INTERVAL_SEC;\n gps_state = GPS_STATE_IDLE;\n }\n }\n else if (GPS_STATE_HOT_START_PRE_FIX == gps_state)\n {\n if (nmea_check_on_lock_event())\n {\n g_clock_gps_off_timeout = GPS_HOT_START_OFF_TIMEOUT_MS;\n gps_state = GPS_STATE_HOT_START_POST_FIX;\n }\n }\n else if (GPS_STATE_HOT_START_POST_FIX == gps_state)\n {\n if (0 == g_clock_gps_off_timeout)\n {\n nmea_get_coordinates(&coordinates);\n coordinates_set = true;\n jf2_io_send_on_pulse();\n g_clock_gps_hot_start_timeout_sec = GPS_HOT_START_INTERVAL_SEC;\n gps_state = GPS_STATE_IDLE;\n }\n }\n else if (GPS_STATE_IDLE == gps_state)\n {\n if (status_check(STATUS_GPS_HOTSTART_EVENT))\n {\n status_clear(STATUS_GPS_HOTSTART_EVENT);\n nmea_reset_on_lock_event();\n jf2_io_send_on_pulse();\n gps_state = GPS_STATE_HOT_START_PRE_FIX;\n }\n }\n}\n\nbool gps_allows_sleep_mode(void)\n{\n return (GPS_STATE_IDLE == gps_state);\n}\n\nconst nmea_coordinates_info_t * gps_get_coordinates(void)\n{\n if (coordinates_set)\n {\n return &coordinates;\n }\n else\n {\n return NULL; \n }\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\n\n" }, { "alpha_fraction": 0.5671796798706055, "alphanum_fraction": 0.5777277946472168, "avg_line_length": 45.079681396484375, "blob_id": "2d5914adf46744f22ee43e2401ec368c7373d6d1", "content_id": "dd9a140653acff477cb564fdfad0b06c5c858189", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 11566, "license_type": "no_license", "max_line_length": 307, "num_lines": 251, "path": "/life_jacket.X/src/uart/terminal_help.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\nThis file is an auto generated file.\nDo not modify its contents manually!\n*/\n#include <string.h>\n#include <stddef.h>\n#include \"hal/uart.h\"\nvoid terminal_help(char* in)\n{\n if (NULL != strstr(in, \"hello\"))\n {\n uart_write_string(\"\\tSay hi!\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"system reset\"))\n {\n uart_write_string(\"\\tForces a software reboot.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"init flash bufffer\"))\n {\n uart_write_string(\"\\tInitiates the flash write buffer with the contents of theflash data memory.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"buffered write\"))\n {\n uart_write_string(\"\\tWrites one byte to the flash buffer.\\n\\r\\tParamters: <index in hex format> <one byte value in hex format>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"flush flash buffer\"))\n {\n uart_write_string(\"\\tWrite the contents of the flash buffer to the flash memory.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"lora cw\"))\n {\n uart_write_string(\"\\tStarts a LORA CW transmission.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"lora gps broadcast\"))\n {\n uart_write_string(\"\\tStarts a LoRa GPS position broadcast.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"lora cont rx\"))\n {\n uart_write_string(\"\\tStarts continuous rx LoRa mode.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"gps on off pulse\"))\n {\n uart_write_string(\"\\tSends a on/off pulse to the GPS module.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"ext flash chip erase\"))\n {\n uart_write_string(\"\\tErases the whole external flash memory.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"ext flash write test\"))\n {\n uart_write_string(\"\\tWrites test data to the first page (first 256 bytes).\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"ef spb\"))\n {\n uart_write_string(\"\\tSets two bytes in the page buffer. Word indexed.\\n\\r\\tParameters: <index in range [0, 127]> <value to set as 4 hex digits>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"ef wp\"))\n {\n uart_write_string(\"\\tWrites the page buffer to a page in the external flash memory.\\n\\r\\tParameters: <page address as 6 hex digits>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"test audio session\"))\n {\n uart_write_string(\"\\tRuns a audio session test.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"write audio test data\"))\n {\n uart_write_string(\"\\tWrites test audio data to the external flash memory.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"restart uart\"))\n {\n uart_write_string(\"\\tResets the debug UART module.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get flash\"))\n {\n uart_write_string(\"\\tGets one byte from the flash data memory.\\n\\r\\tParameter: <index in hex format>\\n\\r\\tReturns: <hex value of byte at specified index>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get ext flash\"))\n {\n uart_write_string(\"\\tGets one byte from the external flash memory.\\n\\r\\tParameter: <address in hex>\\n\\r\\tReturns: <read byte in hex format>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get page ext flash\"))\n {\n uart_write_string(\"\\tGets 256 of bytes from the external flash memory.\\n\\r\\tParameters: <start address in hex>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get gps status\"))\n {\n uart_write_string(\"\\tGets the status of the GPS.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get orientation\"))\n {\n uart_write_string(\"\\tGets the x, y, z values from the accelerometer.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"get lora address\"))\n {\n uart_write_string(\"\\tGets the LORA address.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set flash\"))\n {\n uart_write_string(\"\\tSets one byte in the flash data memory.\\n\\r\\tParamter: <index in hex format> <one byte value in hex format>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set gps echo\"))\n {\n uart_write_string(\"\\tEnabled/disables received GPS messages from being echoed onto the debug UART.\\n\\r\\tParamter: <'on' or 'off'>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora bw\"))\n {\n uart_write_string(\"\\tSets the LoRa channel bandwidth.\\n\\r\\tParameter: <bandwidth setting in range [0, 9]>\\n\\r\\t\\n\\r\\t0 = 7.8kHz\\n\\r\\t1 = 10.4kHz\\n\\r\\t2 = 15.6 kHz\\n\\r\\t3 = 20.8 kHz\\n\\r\\t4 = 31.25 kHz\\n\\r\\t5 = 41.7 kHz\\n\\r\\t6 = 62.5 kHz\\n\\r\\t7 = 125 kHz\\n\\r\\t8 = 250 kHz\\n\\r\\t9 = 500 kHz\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora cr\"))\n {\n uart_write_string(\"\\tSets the LoRa coding rate.\\n\\r\\tParameter: <coding rate setting in range[1, 4]>\\n\\r\\t\\n\\r\\t1 = coding rate 4/5\\n\\r\\t2 = coding rate 4/6\\n\\r\\t3 = coding rate 4/7\\n\\r\\t4 = coding rate 4/8\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora sf\"))\n {\n uart_write_string(\"\\tSets the LoRa spreading factor.\\n\\r\\tParameter: <spreading factor in range [6, 12]>\\n\\r\\t\\n\\r\\tA spreading factor of 'sf' gives 2^(sf) chips\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora freq\"))\n {\n uart_write_string(\"\\tSets the LoRa frequency.\\n\\r\\tParameter: <frequency band in range [1, 8]>\\n\\r\\t\\n\\r\\tBands:\\n\\r\\t1 = 868.1 MHz\\n\\r\\t2 = 868.3 MHz\\n\\r\\t3 = 868.5 MHz\\n\\r\\t4 = 867.1 MHz\\n\\r\\t5 = 867.3 MHz\\n\\r\\t6 = 867.5 MHz\\n\\r\\t7 = 867.7 MHz\\n\\r\\t8 = 867.9 MHz\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set sleep allowed\"))\n {\n uart_write_string(\"\\tEnables/disables sleep mode.\\n\\r\\tParamter: <'on' or 'off'>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set debug log enable\"))\n {\n uart_write_string(\"\\tEnables/disables the debug log.\\n\\r\\tParamter: <'on' or 'off'>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora to p2ps\"))\n {\n uart_write_string(\"\\tSets the LORA protocol to p2ps.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora to p2pc\"))\n {\n uart_write_string(\"\\tSets the LORA protocol to p2pc.\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else if (NULL != strstr(in, \"set lora address\"))\n {\n uart_write_string(\"\\tSets the LORA P2P address.\\n\\r\\tParameter: <address as 8 digit hex number>\\n\\r\\t\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n }\n else\n {\n uart_write_string(\"\\tType \\\"help <command>\\\" for more info\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"\\tAvailible commands:\\n\\r\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"\\t------------------------------------\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"buffered write\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"ef spb\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"ef wp\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"ext flash chip erase\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"ext flash write test\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"flush flash buffer\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get ext flash\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get flash\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get gps status\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get lora address\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get orientation\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"get page ext flash\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"gps on off pulse\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"hello\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"init flash bufffer\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"lora cont rx\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"lora cw\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"lora gps broadcast\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"restart uart\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set debug log enable\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set flash\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set gps echo\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora address\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora bw\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora cr\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora freq\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora sf\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora to p2pc\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set lora to p2ps\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"set sleep allowed\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"system reset\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"test audio session\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"write audio test data\\n\\r\\t\");\n while (!uart_is_write_buffer_empty()){;}\n uart_write_string(\"\\n\\r\");\n }\n}\n" }, { "alpha_fraction": 0.5764074921607971, "alphanum_fraction": 0.5844504237174988, "avg_line_length": 33, "blob_id": "8c8b1098f1b524e384ae483716cb93edc0eb8c7a", "content_id": "a539831d4555a56fbd9fefd9ac5dbcdf6097c2b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 91, "num_lines": 11, "path": "/life_jacket.X/increment_build_number.py", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "if __name__ == \"__main__\":\n with open(\"inc/build_number.h\") as src_file:\n lines = src_file.readlines()\n\n current_version = int(lines[0].split('\"')[1])\n\n with open(\"inc/build_number.h\", 'w') as f:\n print('#define BUILD_NUMBER_STRING \"' + str(current_version + 1) + '\"\\r\\n', file=f)\n\n print(current_version)\n print(\"Build number incremented\")" }, { "alpha_fraction": 0.3285371661186218, "alphanum_fraction": 0.3567146360874176, "avg_line_length": 25.887096405029297, "blob_id": "4ac0d6d9a2707fe5cfefb00d0efa696a76218e71", "content_id": "283ced7638e261fc80d8528e44509e46e9096f47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1668, "license_type": "no_license", "max_line_length": 80, "num_lines": 62, "path": "/life_jacket.X/inc/audio/pcm1770.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef PCM1770_H\n#define PCM1770_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Initializes the PCM1770 chip.\n*/\nvoid pcm1770_init(void);\n\n/**\n @brief Shuts down the PCM1770 chip and its I2S bus.\n*/\nvoid pcm1770_deinit(void);\n\n/**\n @brief Writes a value to one of the PCM1770 registers.\n @param address - Register address to write to.\n @param value - Value to write into the register.\n*/\nvoid pcm1770_write_register(uint8_t address, uint8_t value);\n\n/**\n @brief Exits the power down mode.\n*/\nvoid pcm1770_power_up(void);\n\n/**\n @brief Enters the power down mode.\n*/\nvoid pcm1770_power_down(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* PCM1770_H */\n\n" }, { "alpha_fraction": 0.551563024520874, "alphanum_fraction": 0.5601846575737, "avg_line_length": 27.763200759887695, "blob_id": "9e7dfcb4f1111bb5a7142930b59e222b56dea803", "content_id": "06e2301824d0ad1c90c1e6e776c908d6f028964b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 17978, "license_type": "no_license", "max_line_length": 80, "num_lines": 625, "path": "/life_jacket.X/src/gps/nmea.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n#include <ctype.h>\n#include <stdlib.h>\n\n#include \"gps/nmea.h\"\n#include \"gps/nmea_queue.h\"\n\n#include \"hal/uart.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\ntypedef enum\n{\n NMEA_MODE_INDICATOR_AUTONOMOUS = 'A',\n NMEA_MODE_INDICATOR_DIFFERENTAL = 'D',\n NMEA_MODE_INDICATOR_ESTIMATED = 'E',\n NMEA_MODE_INDICATOR_MANUAL = 'M',\n NMEA_MODE_INDICATOR_SIMULATOR = 'S',\n NMEA_MODE_INDICATOR_DATA_NOT_VALID = 'N'\n} nmea_mode_indicator_t;\n\ntypedef enum\n{\n NMEA_GPS_QUALITY_INDICATOR_NO_FIX = '0',\n NMEA_GPS_QUALITY_INDICATOR_SPS_MODE = '1',\n NMEA_GPS_QUALITY_INDICATOR_DIFF_SPS_MODE = '2',\n NMEA_GPS_QUALITY_INDICATOR_PPS_MODE = '3',\n NMEA_GPS_QUALITY_INDICATOR_RT_KINEMATIC = '4',\n NMEA_GPS_QUALITY_INDICATOR_FLOAT_RTK = '5',\n NMEA_GPS_QUALITY_INDICATOR_DEAD_RECKONING = '6',\n NMEA_GPS_QUALITY_INDICATOR_MANUAL_INPUT = '7',\n NMEA_GPS_QUALITY_INDICATOR_SIMULATOR = '8'\n} nmea_gps_quality_indicator_t;\n\ntypedef enum\n{\n NMEA_RMC_FIELD_MNEMONIC = 0,\n NMEA_RMC_FIELD_UTC = 1,\n NMEA_RMC_FIELD_STATUS = 2,\n NMEA_RMC_FIELD_LATITUDE_VAL = 3,\n NMEA_RMC_FIELD_LATITUDE_NS = 4,\n NMEA_RMC_FIELD_LONGITUDE_VAL = 5,\n NMEA_RMC_FIELD_LONGITUDE_EW = 6,\n NMEA_RMC_FIELD_SPEED_OVER_GROUND = 7,\n NMEA_RMC_FIELD_COURSE_OVER_GROUND = 8,\n NMEA_RMC_FIELD_DATE = 9,\n NMEA_RMC_FIELD_MAGNETIC_VARIATION_DEG = 10,\n NMEA_RMC_FIELD_MAGNETIC_VARIATION_EW = 11,\n NMEA_RMC_FIELD_MODE_INDICATOR = 12\n} nmea_rmc_field_t;\n\ntypedef enum\n{\n NMEA_GGA_FIELD_MNEMONIC = 0,\n NMEA_GGA_FIELD_UTC = 1,\n NMEA_GGA_FIELD_LATITUDE_VAL = 2,\n NMEA_GGA_FIELD_LATITUDE_NS = 3,\n NMEA_GGA_FIELD_LONGITUDE_VAL = 4,\n NMEA_GGA_FIELD_LONGITUDE_EW = 5,\n NMEA_GGA_FIELD_GPS_QUALITY = 6,\n NMEA_GGA_FIELD_SATELLITE_COUNT = 7,\n NMEA_GGA_FIELD_HORIZONTAL_PRECISION = 8,\n NMEA_GGA_FIELD_ALTITUDE = 9,\n NMEA_GGA_FIELD_ALTITUDE_METERS = 10,\n NMEA_GGA_FIELD_GEOIDAL_SEPARATION = 11,\n NMEA_GGA_FIELD_GEOIDAL_SEPARATION_METERS= 12,\n NMEA_GGA_FIELD_AGE_OF_DIFF_GPS_DATA = 13,\n NMEA_GGA_FIELD_DIFF_REF_STATION_ID = 14\n} nmea_gga_field_t;\n\ntypedef enum\n{\n NMEA_GSV_FIELD_MNEMONIC = 0,\n NMEA_GSV_FIELD_NUMBER_OF_SENTENCES = 1,\n NMEA_GSV_FIELD_SATELLITES_IN_VIEW = 2,\n} nmea_gsv_field_t;\n\n#define LATITUDE_STR_LEN (11)\n#define LONGITUDE_STR_LEN (11)\n\ntypedef struct nmea_position_info_t\n{\n char latitude[LATITUDE_STR_LEN];\n char latitude_ns;\n char longitude[LONGITUDE_STR_LEN];\n char longitude_ew;\n char time_of_fix[9];\n} nmea_position_info_t;\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n\nstatic const char NMEA_START_CHAR = '$';\nstatic const char NMEA_CHECKSUM_CHAR = '*';\nstatic const char NMEA_FIELD_SEPERATOR = ',';\n\nstatic const char NMEA_STATUS_LOCKED = 'A';\nstatic const char NMEA_STATUS_WARNING = 'V';\n\n#define NMEA_MNEMONIC_CODE_LEN (3)\n#define NMEA_UTC_FIELD_LEN (9)\n\n#define NMEA_RMC_FIELD_COUNT (13)\n#define NMEA_GGA_FIELD_COUNT (15)\n\n// =============================================================================\n// Private variables\n// =============================================================================\n\nstatic bool is_locked = false;\nstatic bool on_lock_event_flag = false;\nstatic nmea_position_info_t position_info;\n\nstatic nmea_mode_indicator_t mode_indicator;\nstatic nmea_gps_quality_indicator_t gps_quality_indicator;\nstatic uint8_t satellites_in_view;\n\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n/**\n @brief Checks if the checksum of a message is correct.\n @param message - Message which checksum to check.\n @return True if the checksum was correct.\n*/\nstatic bool nmea_is_checksum_ok(char * message);\n\n/**\n @brief Parses a NMEA output message.\n @param message - Message to parse.\n*/\nstatic void nmea_parse_message(char * message);\n\n/**\n @brief Returns a pointer to the start of the next field in a NMEA message.\n @param message - Start of the current NMEA field.\n @param message_end - End of message.\n*/\nstatic char * nmea_next_field(char * message, char * message_end);\n\n/**\n @brief Finds the end of a NMEA message.\n @param message - Message to find the end of.\n*/\nstatic char * nmea_find_end_of_message(char * message);\n\n/**\n @brief Handles a global positioning system fix data message.\n @param message - Message to handle.\n*/\nstatic void nmea_handle_gga_message(char * message);\n\n/**\n @brief Handles a GNSS DOP and active satillites in view message.\n @param message - Message to handle.\n*/\nstatic void nmea_handle_gsa_message(char * message);\n\n/**\n @brief Handles a Recommended minimum specific GNSS data message.\n @param message - Message to handle.\n*/\nstatic void nmea_handle_rmc_message(char * message);\n\n/**\n @brief Handles a GNSS satellites in view message.\n @param message - Message to handle.\n*/\nstatic void nmea_handle_gsv_message(char * message);\n\n/**\n @brief Parses a string with degrees and minutes.\n @param string - String to parse.\n @param degrees - Parsed degrees value.\n @param minutes - Parsed minutes value.\n*/\nstatic void nmea_parse_deg_minutes(const char * string,\n uint16_t * degrees,\n float * minutes);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid nmea_handle_message(char * message)\n{\n if (nmea_is_checksum_ok(message))\n {\n nmea_parse_message(message);\n }\n}\n\nvoid nmea_reset_on_lock_event(void)\n{\n on_lock_event_flag = 0;\n}\n\nbool nmea_check_on_lock_event(void)\n{\n return on_lock_event_flag;\n}\n\nvoid nmea_get_coordinates(nmea_coordinates_info_t * coordinates)\n{\n char hours_str[3];\n char minutes_str[3];\n char seconds_str[3];\n\n coordinates->latitude_north = ('N' == position_info.latitude_ns);\n nmea_parse_deg_minutes(position_info.latitude,\n &(coordinates->latitude_deg),\n &(coordinates->latitude_minutes));\n\n coordinates->longitude_east = ('E' == position_info.longitude_ew);\n nmea_parse_deg_minutes(position_info.longitude,\n &(coordinates->longitude_deg),\n &(coordinates->longitude_minutes));\n\n hours_str[0] = position_info.time_of_fix[0];\n hours_str[1] = position_info.time_of_fix[1];\n hours_str[2] = NULL;\n minutes_str[0] = position_info.time_of_fix[2];\n minutes_str[1] = position_info.time_of_fix[3];\n minutes_str[2] = NULL;\n seconds_str[0] = position_info.time_of_fix[4];\n seconds_str[1] = position_info.time_of_fix[5];\n seconds_str[2] = NULL;\n\n coordinates->time_of_fix_hours = (uint8_t)atoi(hours_str);\n coordinates->time_of_fix_minutes = (uint8_t)atoi(minutes_str);\n coordinates->time_of_fix_seconds = (uint8_t)atoi(seconds_str);\n}\n\nvoid nmea_print_status(void)\n{\n uart_write_string(\"\\tMode indicator: \");\n\n switch (mode_indicator)\n {\n case NMEA_MODE_INDICATOR_AUTONOMOUS:\n uart_write_string(\"AUTONOMOUS\");\n break;\n\n case NMEA_MODE_INDICATOR_DIFFERENTAL:\n uart_write_string(\"DIFFERENTAL\");\n break;\n\n case NMEA_MODE_INDICATOR_ESTIMATED:\n uart_write_string(\"ESTIMATED\");\n break;\n\n case NMEA_MODE_INDICATOR_MANUAL:\n uart_write_string(\"MANUAL\");\n break;\n\n case NMEA_MODE_INDICATOR_SIMULATOR:\n uart_write_string(\"SIMULATOR\");\n break;\n\n case NMEA_MODE_INDICATOR_DATA_NOT_VALID:\n uart_write_string(\"DATA NOT VALID\");\n break;\n }\n\n uart_write_string(\"\\r\\n\\tQuality indicator: \");\n\n switch (gps_quality_indicator)\n {\n case NMEA_GPS_QUALITY_INDICATOR_NO_FIX:\n uart_write_string(\"FIX NOT AVAILABLE OR INVALID\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_SPS_MODE:\n uart_write_string(\"SPS MODE, FIX VALID\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_DIFF_SPS_MODE:\n uart_write_string(\"DIFFERENTIAL GPS, SPS MODE, FIX VALID\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_PPS_MODE:\n uart_write_string(\"GPS PPS MODE, FIX VALID\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_RT_KINEMATIC:\n uart_write_string(\"REAL TIME KINEMATIC\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_FLOAT_RTK:\n uart_write_string(\"FLOAT REAL TIME KINEMATIC\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_DEAD_RECKONING:\n uart_write_string(\"ESTIMATED MODE\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_MANUAL_INPUT:\n uart_write_string(\"MANUAL INPUT MODE\");\n break;\n\n case NMEA_GPS_QUALITY_INDICATOR_SIMULATOR:\n uart_write_string(\"SIMULATOR MODE\");\n break;\n }\n\n uart_write_string(\"\\r\\n\\tLocked: \");\n\n if (is_locked)\n {\n uart_write_string(\"true\");\n }\n else\n {\n uart_write_string(\"false\");\n }\n\n uart_write_string(\"\\r\\n\\tLatitude: \");\n uart_write_string(position_info.latitude);\n uart_write_string(\" \");\n uart_write(position_info.latitude_ns);\n uart_write_string(\"\\r\\n\\tLongitude: \");\n uart_write_string(position_info.longitude);\n uart_write_string(\" \");\n uart_write(position_info.longitude_ew);\n uart_write_string(\"\\r\\n\\tTime of fix (UTC): \");\n uart_write_string(position_info.time_of_fix);\n uart_write_string(\"\\r\\n\");\n}\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nstatic bool nmea_is_checksum_ok(char * message)\n{\n bool checksum_ok = true;\n char * checksum_str;\n uint8_t checksum;\n\n if (NMEA_START_CHAR != message[0])\n {\n checksum_ok = false;\n }\n\n if (checksum_ok)\n {\n checksum_str = memchr(message,\n NMEA_CHECKSUM_CHAR,\n NMEA_MAX_MESSAGE_LENGTH - 2);\n\n checksum_ok = checksum_ok && (NULL != checksum_str);\n }\n\n if (checksum_ok)\n {\n checksum_ok = checksum_ok && isdigit(checksum_str[1]);\n checksum_ok = checksum_ok && isdigit(checksum_str[2]);\n }\n\n if (checksum_ok)\n {\n uint8_t calculated_checksum;\n\n char * m = message + 1;\n calculated_checksum = *m++;\n\n while (m != checksum_str)\n {\n calculated_checksum ^= *m++;\n }\n\n checksum = strtol(checksum_str + 1, NULL, 16);\n\n checksum_ok = (checksum == calculated_checksum);\n }\n\n return checksum_ok;\n}\n\nstatic void nmea_parse_message(char * message)\n{\n char mnemonic_code[NMEA_MNEMONIC_CODE_LEN + 1];\n\n mnemonic_code[0] = message[3];\n mnemonic_code[1] = message[4]; \n mnemonic_code[2] = message[5];\n mnemonic_code[3] = 0;\n\n if (0 == strcmp(mnemonic_code, \"GGA\"))\n {\n nmea_handle_gga_message(message);\n }\n else if (0 == strcmp(mnemonic_code, \"GSA\"))\n {\n nmea_handle_gsa_message(message);\n }\n else if (0 == strcmp(mnemonic_code, \"RMC\"))\n {\n nmea_handle_rmc_message(message);\n }\n else if (0 == strcmp(mnemonic_code, \"GSV\"))\n {\n nmea_handle_gsv_message(message);\n }\n}\n\nstatic char * nmea_next_field(char * message, char * message_end)\n{\n char * m;\n\n m = memchr(message, NMEA_FIELD_SEPERATOR, message_end - message + 1);\n\n return m + 1;\n}\n\nstatic char * nmea_find_end_of_message(char * message)\n{\n return memchr(message, NMEA_CHECKSUM_CHAR, NMEA_MAX_MESSAGE_LENGTH + 3);\n}\n\nstatic void nmea_handle_gga_message(char * message)\n{\n char * fields[NMEA_GGA_FIELD_COUNT];\n char * message_end = nmea_find_end_of_message(message);\n uint16_t i;\n\n fields[0] = message;\n\n for (i = 1; i != NMEA_GGA_FIELD_COUNT; ++i)\n {\n fields[i] = nmea_next_field(fields[i - 1], message_end);\n }\n\n gps_quality_indicator = *fields[NMEA_GGA_FIELD_GPS_QUALITY];\n\n is_locked =\n (NMEA_GPS_QUALITY_INDICATOR_SPS_MODE == gps_quality_indicator) ||\n (NMEA_GPS_QUALITY_INDICATOR_DIFF_SPS_MODE == gps_quality_indicator) ||\n (NMEA_GPS_QUALITY_INDICATOR_PPS_MODE == gps_quality_indicator);\n\n if (is_locked &&\n (NMEA_GPS_QUALITY_INDICATOR_SPS_MODE == gps_quality_indicator) &&\n (NMEA_MODE_INDICATOR_AUTONOMOUS == mode_indicator))\n {\n on_lock_event_flag = true; \n }\n\n if (is_locked)\n {\n uint16_t latitude_field_len;\n uint16_t longitude_field_len;\n\n latitude_field_len = fields[NMEA_GGA_FIELD_LATITUDE_VAL + 1] -\n fields[NMEA_GGA_FIELD_LATITUDE_VAL] - 1;\n\n memset(position_info.latitude, 0, LATITUDE_STR_LEN);\n memcpy(position_info.latitude,\n fields[NMEA_GGA_FIELD_LATITUDE_VAL],\n latitude_field_len);\n\n position_info.latitude_ns = *fields[NMEA_GGA_FIELD_LATITUDE_NS];\n\n longitude_field_len = fields[NMEA_GGA_FIELD_LONGITUDE_VAL + 1] -\n fields[NMEA_GGA_FIELD_LONGITUDE_VAL] - 1;\n\n memset(position_info.longitude, 0, LONGITUDE_STR_LEN);\n memcpy(position_info.longitude,\n fields[NMEA_GGA_FIELD_LONGITUDE_VAL],\n longitude_field_len);\n\n position_info.longitude_ew = *fields[NMEA_GGA_FIELD_LONGITUDE_EW];\n\n memcpy(position_info.time_of_fix,\n fields[NMEA_GGA_FIELD_UTC],\n NMEA_UTC_FIELD_LEN);\n }\n}\n\nstatic void nmea_handle_gsa_message(char * message)\n{\n // GNSS DOP and Active Satellites message\n ;\n}\n\nstatic void nmea_handle_rmc_message(char * message)\n{\n char * fields[NMEA_RMC_FIELD_COUNT];\n char * message_end = nmea_find_end_of_message(message);\n uint16_t i;\n\n fields[0] = message;\n\n for (i = 1; i != NMEA_RMC_FIELD_COUNT; ++i)\n {\n fields[i] = nmea_next_field(fields[i - 1], message_end);\n }\n\n is_locked = (NMEA_STATUS_LOCKED == *fields[NMEA_RMC_FIELD_STATUS]);\n\n if (is_locked &&\n (NMEA_GPS_QUALITY_INDICATOR_SPS_MODE == gps_quality_indicator) &&\n (NMEA_MODE_INDICATOR_AUTONOMOUS == mode_indicator))\n {\n on_lock_event_flag = true; \n }\n\n if (is_locked)\n {\n uint16_t latitude_field_len;\n uint16_t longitude_field_len;\n\n latitude_field_len = fields[NMEA_RMC_FIELD_LATITUDE_VAL + 1] -\n fields[NMEA_RMC_FIELD_LATITUDE_VAL] - 1;\n\n memset(position_info.latitude, 0, LATITUDE_STR_LEN);\n memcpy(position_info.latitude,\n fields[NMEA_RMC_FIELD_LATITUDE_VAL],\n latitude_field_len);\n\n position_info.latitude_ns = *fields[NMEA_RMC_FIELD_LATITUDE_NS];\n\n longitude_field_len = fields[NMEA_RMC_FIELD_LONGITUDE_VAL + 1] -\n fields[NMEA_RMC_FIELD_LONGITUDE_VAL] - 1;\n\n memset(position_info.longitude, 0, LONGITUDE_STR_LEN);\n memcpy(position_info.longitude,\n fields[NMEA_RMC_FIELD_LONGITUDE_VAL],\n longitude_field_len);\n\n position_info.longitude_ew = *fields[NMEA_RMC_FIELD_LONGITUDE_EW];\n\n memcpy(position_info.time_of_fix,\n fields[NMEA_RMC_FIELD_UTC],\n NMEA_UTC_FIELD_LEN);\n\n mode_indicator =\n (nmea_mode_indicator_t)*fields[NMEA_RMC_FIELD_MODE_INDICATOR];\n }\n}\n\nstatic void nmea_handle_gsv_message(char * message)\n{\n // GNSS Satellites in View message\n char * start_of_field;\n char * end_of_field;\n char * message_end;\n char * p_src;\n char * p_dst;\n char str[8];\n\n message_end = nmea_find_end_of_message(message);\n\n start_of_field = message;\n start_of_field = nmea_next_field(start_of_field, message_end);\n start_of_field = nmea_next_field(start_of_field, message_end);\n start_of_field = nmea_next_field(start_of_field, message_end);\n end_of_field = nmea_next_field(start_of_field, message_end);\n\n if (end_of_field - start_of_field >= 8)\n {\n return;\n }\n\n p_src = start_of_field;\n p_dst = str;\n\n while (p_src != end_of_field)\n {\n *p_dst++ = *p_src++;\n }\n\n p_dst = 0;\n\n if (strlen(str))\n {\n satellites_in_view = (uint8_t)atoi(str);\n }\n else\n {\n satellites_in_view = 0;\n }\n\n}\n\nstatic void nmea_parse_deg_minutes(const char * string,\n uint16_t * degrees,\n float * minutes)\n{\n const char * p;\n const char * minutes_str;\n char degrees_str[4];\n uint8_t degrees_len;\n uint8_t i;\n\n p = strchr(string, '.');\n degrees_len = p - string - 2;\n\n for (i = 0; i != degrees_len; ++i)\n {\n degrees_str[i] = string[i];\n }\n\n degrees_str[i] = 0;\n\n minutes_str = p - 2;\n\n *degrees = (uint8_t)atoi(degrees_str);\n *minutes = atof(minutes_str);\n}\n\n" }, { "alpha_fraction": 0.46763119101524353, "alphanum_fraction": 0.49534085392951965, "avg_line_length": 32.14634323120117, "blob_id": "60d67b6172bfec905da5ff79eefa97e49e73844e", "content_id": "a36703e2bce50ea72e61afffc9aadfd2eeaff156", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 4078, "license_type": "no_license", "max_line_length": 80, "num_lines": 123, "path": "/life_jacket.X/inc/audio/ext_flash.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef EXT_FLASH_H\n#define EXT_FLASH_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n EXT_FLASH_CMD_READ = 0x03,\n EXT_FLASH_CMD_FAST_READ = 0x0B,\n EXT_FLASH_CMD_2READ = 0xBB,\n EXT_FLASH_CMD_DREAD = 0x3B,\n EXT_FLASH_CMD_4READ = 0xEB,\n EXT_FLASH_CMD_QREAD = 0x6B,\n EXT_FLASH_CMD_PAGE_PROGRAM = 0x02,\n EXT_FLASH_CMD_QUAD_PAGE_PROGRAM = 0x38,\n EXT_FLASH_CMD_SECTOR_ERASE = 0x20,\n EXT_FLASH_CMD_BLOCK_ERASE_32 = 0x52,\n EXT_FLASH_CMD_BLOCK_ERASE_64 = 0xD8,\n EXT_FLASH_CMD_CHIP_ERASE = 0x60,\n EXT_FLASH_CMD_READ_SFDP = 0x5A,\n EXT_FLASH_CMD_WRITE_ENABLE = 0x06,\n EXT_FLASH_CMD_WRITE_DISABLE = 0x04,\n EXT_FLASH_CMD_READ_STATUS = 0x05,\n EXT_FLASH_CMD_READ_CONFIG = 0x15,\n EXT_FLASH_CMD_WRITE_STATUS = 0x01,\n EXT_FLASH_CMD_SUSPEND = 0x75,\n EXT_FLASH_CMD_RESUME = 0x7A,\n EXT_FLASH_CMD_DEEP_POWER_DOWN = 0xB9,\n EXT_FLASH_CMD_SET_BURST_LENGTH = 0xC0,\n EXT_FLASH_CMD_READ_ID = 0x9F,\n EXT_FLASH_CMD_READ_ELECTRONIC_ID = 0xAB,\n EXT_FLASH_CMD_READ_MANUFACTURER = 0x90,\n EXT_FLASH_CMD_ENTER_SECURED_OTP = 0xB1,\n EXT_FLASH_CMD_EXIT_SECURED_OTP = 0xC1,\n EXT_FLASH_CMD_READ_SECURITY = 0x2B,\n EXT_FLASH_CMD_WRITE_SECURITY = 0x2F,\n EXT_FLASH_CMD_NOP = 0x00,\n EXT_FLASH_CMD_RESET_ENABLE = 0x66,\n EXT_FLASH_CMD_RESET_MEMORY = 0x99\n} ext_flash_command_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n#define EXT_FLASH_PAGE_LENGTH (256)\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Initializes the mx25r6435f chip.\n*/\nvoid ext_flash_init(void);\n\n/**\n @brief Reads a number of consecutive bytes from the flash memeory.\n @param destination - Where to store the read data.\n @param address - Address of first byte to read.\n @param length - Number of bytes to read. Must be multiple of 4.\n*/\nvoid ext_flash_read(void * destination, uint32_t address, uint32_t length);\n\n/**\n @brief Reads one byte from the external flash memory.\n @param address - Address to read from.\n @return Read byte.\n*/\nuint8_t ext_flash_read_byte(uint32_t address);\n\n/**\n @brief Reads one word from the external flash memory.\n @param address - Address to read from.\n @return Read word.\n*/\nuint16_t ext_flash_read_word(uint32_t address);\n\n/**\n @brief Reads one dword from the external flash memory.\n @param address - Address to read from.\n @return Read dword.\n*/\nuint32_t ext_flash_read_dword(uint32_t address);\n\n/**\n @brief Erases all data on the flash memory.\n*/\nvoid ext_flash_chip_erase(void);\n\n/**\n @brief Checks if a write is in progress.\n*/\nbool ext_flash_is_write_in_progress(void);\n\n/**\n @brief Programs a 256 byte data block.\n @param data - The data to program the flash with.\n @param address - Start address of the page to program.\n*/\nvoid ext_flash_program_page(const void * data, uint32_t address);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* EXT_FLASH_H */\n\n" }, { "alpha_fraction": 0.4327065348625183, "alphanum_fraction": 0.45372912287712097, "avg_line_length": 22.603490829467773, "blob_id": "a1d60c648a46adaafa551795ddfc9acf271684f1", "content_id": "eec6e96e2b9786dfc4822c4db4eeeedc0a96aa16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 9466, "license_type": "no_license", "max_line_length": 87, "num_lines": 401, "path": "/life_jacket.X/src/gps/jf2_uart.c", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "/*\n * This file handes the UART module.\n *\n * References:\n * - PIC24FJ64GA006 datasheet, document number DS39747D, page 139\n */\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <xc.h>\n\n#include <string.h>\n#include <stdint.h>\n#include <stdbool.h>\n\n#include \"gps/jf2_uart.h\"\n#include \"gps/nmea_queue.h\"\n#include \"hal/gpio.h\"\n#include \"hal/clock.h\"\n#include \"status.h\"\n\n#include \"hal/uart.h\"\n\n// =============================================================================\n// Private type definitions\n// =============================================================================\n\n// =============================================================================\n// Global variables\n// =============================================================================\n\n// =============================================================================\n// Private constants\n// =============================================================================\n#define BUFFER_SIZE ((uint16_t)256)\n#define BACKSPACE_CHAR (0x08)\n\nstatic const uint32_t UART_BAUD = 9600;\nstatic const uint32_t PERIPHERAL_FREQ = CLOCK_HAL_PCBCLOCK_FREQ;\n\nstatic const uint8_t COMMAND_START_CHAR = '$';\nstatic const uint8_t COMMAND_TERMINATION_CHAR = '\\r';\n\n// =============================================================================\n// Private variables\n// =============================================================================\nstatic bool uart_initialized = false;\n\nstatic volatile uint8_t rx_buff[BUFFER_SIZE];\nstatic volatile uint8_t tx_buff[BUFFER_SIZE];\n\nstatic volatile uint16_t rx_buff_first = 0;\nstatic volatile uint16_t rx_buff_last = 0;\nstatic volatile uint16_t tx_buff_first = 0;\nstatic volatile uint16_t tx_buff_last = 0;\n\nstatic volatile uint16_t rx_buff_size = 0;\nstatic volatile uint16_t tx_buff_size = 0;\n\nstatic volatile bool receiving_message = false;\nstatic char rx_message[NMEA_MAX_MESSAGE_LENGTH];\n\nstatic volatile bool debug_echo_enabled = false;\n// =============================================================================\n// Private function declarations\n// =============================================================================\n\n/**\n * @brief Starts transmission of the tx buffer.\n * @param void\n * @return void\n */\nstatic void start_tx(void);\n\n// =============================================================================\n// Public function definitions\n// =============================================================================\n\nvoid jf2_uart_init()\n{\n volatile int16_t wait_cnt = 0;\n\n if (false == uart_initialized)\n {\n //\n // Variables\n //\n rx_buff_first = 0;\n rx_buff_last = 0;\n tx_buff_first = 0;\n tx_buff_last = 0;\n\n rx_buff_size = 0;\n tx_buff_size = 0;\n\n //\n // IO ports\n //\n GPS_TXD_DIR = DIR_OUT;\n GPS_RXD_DIR = DIR_IN;\n\n //\n // UART module\n //\n U1MODE = 0x0000;\n U1STA = 0x0000;\n\n U1BRG = (PERIPHERAL_FREQ / UART_BAUD) / 16 - 1;\n\n U1MODEbits.PDSEL = 0; // 8 bit data, no parity\n U1MODEbits.STSEL = 0; // 1 Stop bit\n U1MODEbits.URXINV = 1; // Idle state low\n\n // Interrupt is generated when any character is transfered to the\n // Transmit Shift Register and the hw transmit buffer is empty.\n U1STAbits.UTXISEL0 = 0;\n U1STAbits.UTXISEL1 = 0;\n IPC3bits.U1TXIP = 2; // Interrupt priority\n IEC0bits.U1TXIE = 1; // TX interrupt enable\n\n // Interrupt is generated each time a data word is transfered from\n // the U1RSR to the receive buffer. There may be one or more characters\n // in the receive buffer.\n U1STAbits.URXISEL = 0;\n IPC2bits.U1RXIP = 2; // Interrupt priority\n IEC0bits.U1RXIE = 1; // RX interrupt enable\n\n U1MODEbits.UARTEN = 1;\n U1STAbits.UTXEN = 1;\n\n for (wait_cnt = 0; wait_cnt != PERIPHERAL_FREQ / UART_BAUD; ++wait_cnt)\n {\n ;\n }\n\n debug_echo_enabled = false;\n uart_initialized = true;\n }\n}\n\nvoid jf2_uart_write(uint8_t data)\n{\n if ((0 == tx_buff_size) && (0 == U1STAbits.UTXBF))\n {\n // hw transmit buffer not full but tx buffer is.\n U1TXREG = data;\n }\n else if (tx_buff_size < BUFFER_SIZE)\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = data;\n\n ++tx_buff_size;\n }\n}\n\nvoid jf2_uart_write_string(const char* data)\n{\n const uint8_t* p = (const uint8_t*)data;\n\n // Update the tx buffer.\n while (*p && (tx_buff_size < BUFFER_SIZE))\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = *(p++);\n\n ++tx_buff_size;\n }\n\n start_tx();\n}\n\nvoid jf2_uart_write_array(uint16_t nbr_of_bytes, const uint8_t* data)\n{\n uint16_t i;\n\n // Update the tx buffer.\n for (i = 0; i != nbr_of_bytes; ++i)\n {\n if (tx_buff_size < BUFFER_SIZE)\n {\n if (0 != tx_buff_size)\n {\n ++tx_buff_last;\n\n if (tx_buff_last >= BUFFER_SIZE)\n {\n tx_buff_last = 0;\n }\n }\n\n tx_buff[tx_buff_last] = *(data++);\n\n if (tx_buff_size < BUFFER_SIZE)\n {\n ++tx_buff_size;\n }\n }\n }\n\n start_tx();\n}\n\nbool jf2_uart_is_write_buffer_empty(void)\n{\n return tx_buff_size == 0;\n}\n\nuint8_t jf2_uart_get(uint16_t index)\n{\n uint16_t i;\n uint8_t data;\n\n jf2_uart_disable_rx_interrupt();\n\n i = rx_buff_first + index;\n\n if (i >= BUFFER_SIZE)\n {\n i -= BUFFER_SIZE;\n }\n\n data = rx_buff[i];\n\n jf2_uart_enable_rx_interrupt();\n\n return data;\n}\n\nuint16_t jf_uart_get_receive_buffer_size(void)\n{\n return rx_buff_size;\n}\n\nbool jf2_uart_is_receive_buffer_empty(void)\n{\n return (0 == rx_buff_size);\n}\n\nvoid jf2_uart_clear_receive_buffer(void)\n{\n jf2_uart_disable_rx_interrupt();\n\n rx_buff_size = 0;\n rx_buff_first = 0;\n rx_buff_last = 0;\n\n jf2_uart_enable_rx_interrupt();\n}\n\nvoid jf2_uart_enable_debug_uart_echo(bool enable)\n{\n debug_echo_enabled = enable;\n}\n\n\n// =============================================================================\n// Private function definitions\n// =============================================================================\n\nvoid __attribute__((interrupt, no_auto_psv)) _U1TXInterrupt(void)\n{\n while ((0 == U1STAbits.UTXBF) && (0 != tx_buff_size))\n {\n // TX fifo not full and there are more things to send\n U1TXREG = tx_buff[tx_buff_first];\n\n if (1 != tx_buff_size)\n {\n ++tx_buff_first;\n }\n\n if (tx_buff_first >= BUFFER_SIZE)\n {\n tx_buff_first = 0;\n }\n\n --tx_buff_size;\n }\n\n IFS0bits.U1TXIF = 0;\n}\n\nvoid __attribute__((interrupt, no_auto_psv)) _U1RXInterrupt(void)\n{\n uint8_t received;\n\n jf2_uart_disable_tx_interrupt();\n\n if (U1STAbits.OERR)\n {\n U1STAbits.OERR = 0;\n }\n\n while (U1STAbits.URXDA)\n {\n received = U1RXREG;\n\n if (debug_echo_enabled)\n {\n uart_write(received);\n }\n\n if (!receiving_message &&\n (COMMAND_START_CHAR == received))\n {\n receiving_message = true;\n }\n\n if (receiving_message)\n {\n if (0 != rx_buff_size)\n {\n ++rx_buff_last;\n\n if (rx_buff_last >= BUFFER_SIZE)\n {\n rx_buff_last = 0;\n }\n }\n\n rx_buff[rx_buff_last] = received;\n\n ++rx_buff_size;\n \n if (COMMAND_TERMINATION_CHAR == received)\n {\n uint16_t current_index = 0;\n\n for (current_index = 0; current_index != rx_buff_size; ++current_index)\n {\n rx_message[current_index] = (char)jf2_uart_get(current_index);\n }\n\n rx_message[current_index] = NULL;\n\n nmea_queue_append(nmea_queue_get_rx_queue(),\n rx_message,\n rx_buff_size);\n\n receiving_message = false;\n jf2_uart_clear_receive_buffer();\n }\n else if (rx_buff_size > NMEA_MAX_MESSAGE_LENGTH)\n {\n receiving_message = false;\n jf2_uart_clear_receive_buffer();\n }\n }\n }\n\n jf2_uart_enable_tx_interrupt();\n\n IFS0bits.U1RXIF = 0;\n}\n\nstatic void start_tx(void)\n{\n jf2_uart_disable_tx_interrupt();\n jf2_uart_disable_rx_interrupt();\n\n while ((0 != tx_buff_size) && (0 == U1STAbits.UTXBF))\n {\n U1TXREG = tx_buff[tx_buff_first];\n\n if (1 != tx_buff_size)\n {\n ++tx_buff_first;\n }\n\n if (tx_buff_first >= BUFFER_SIZE)\n {\n tx_buff_first = 0;\n }\n\n --tx_buff_size;\n }\n\n jf2_uart_enable_tx_interrupt();\n jf2_uart_enable_rx_interrupt();\n}\n\n" }, { "alpha_fraction": 0.36180421710014343, "alphanum_fraction": 0.4411388337612152, "avg_line_length": 33.72222137451172, "blob_id": "aea83550dd4ed2d22db5eff037269aa05b0bb664", "content_id": "4ef890351384aa52d5bb810d09de8c829dd0c949", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3126, "license_type": "no_license", "max_line_length": 80, "num_lines": 90, "path": "/life_jacket.X/inc/acc/lis2hh12_io.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef LIS2HH12_H\n#define LIS2HH12_H\n\n#ifdef __cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include <stdint.h>\n#include <stdbool.h>\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\ntypedef enum\n{\n LIS2HH12_REG_TEMP_L = 0x0B,\n LIS2HH12_REG_TEMP_H = 0x0C,\n LIS2HH12_REG_WHO_AM_I = 0x0F,\n LIS2HH12_REG_ACT_THS = 0x1E,\n LIS2HH12_REG_ACT_DUR = 0x1F,\n LIS2HH12_REG_CTRL1 = 0x20,\n LIS2HH12_REG_CTRL2 = 0x21,\n LIS2HH12_REG_CTRL3 = 0x22,\n LIS2HH12_REG_CTRL4 = 0x23,\n LIS2HH12_REG_CTRL5 = 0x24,\n LIS2HH12_REG_CTRL6 = 0x25,\n LIS2HH12_REG_CTRL7 = 0x26,\n LIS2HH12_REG_STATUS = 0x27,\n LIS2HH12_REG_OUT_X_L = 0x28,\n LIS2HH12_REG_OUT_X_H = 0x29,\n LIS2HH12_REG_OUT_Y_L = 0x2A,\n LIS2HH12_REG_OUT_Y_H = 0x2B,\n LIS2HH12_REG_OUT_Z_L = 0x2C,\n LIS2HH12_REG_OUT_Z_H = 0x2D,\n LIS2HH12_REG_FIFO_CTRL = 0x2E,\n LIS2HH12_REG_FIFO_SRC = 0x2F,\n LIS2HH12_REG_IG_CFG1 = 0x30,\n LIS2HH12_REG_IG_SRC1 = 0x31,\n LIS2HH12_REG_IG_THS_X1 = 0x32,\n LIS2HH12_REG_IG_THS_Y1 = 0x33,\n LIS2HH12_REG_IG_THS_Z1 = 0x34,\n LIS2HH12_REG_IG_DUR1 = 0x35,\n LIS2HH12_REG_CFG2 = 0x36,\n LIS2HH12_REG_SRC2 = 0x37,\n LIS2HH12_REG_THS2 = 0x38,\n LIS2HH12_REG_DUR2 = 0x39,\n LIS2HH12_REG_XL_REFERENCE = 0x3A,\n LIS2HH12_REG_XH_REFERENCE = 0x3B,\n LIS2HH12_REG_YL_REFERENCE = 0x3C,\n LIS2HH12_REG_YH_REFERENCE = 0x3D,\n LIS2HH12_REG_ZL_REFERENCE = 0x3E,\n LIS2HH12_REG_ZH_REFERENCE = 0x3F\n} lis2hh12_address_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Writes a value to one of the registers.\n @param reg - Address of the register to write to.\n @param value - The value which shall be written to the register.\n*/\nvoid lis2hh12_write_register(lis2hh12_address_t reg, uint8_t value);\n\n/**\n @brief Reads the contents of one register.\n @param reg - Address of the register to read from.\n @return The value read from the specified register.\n*/\nuint8_t lis2hh12_read_register(lis2hh12_address_t reg);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* LIS2HH12_H */\n\n" }, { "alpha_fraction": 0.6052631735801697, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 33, "blob_id": "0c8413b36afaa855ea64b694a9ce9028f3111514", "content_id": "579730351f4ff2ff916f4ab249770a1501426e15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 38, "license_type": "no_license", "max_line_length": 33, "num_lines": 1, "path": "/life_jacket.X/inc/build_number.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#define BUILD_NUMBER_STRING \"387\"\r\r\n\r\n" }, { "alpha_fraction": 0.44720929861068726, "alphanum_fraction": 0.5380232334136963, "avg_line_length": 30.383211135864258, "blob_id": "3c2ef9a2ac3556ee3cedfc4e629f7ecf2e4bb7ec", "content_id": "4e40cf5181288897491bf383db5059dff09269e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 8600, "license_type": "no_license", "max_line_length": 80, "num_lines": 274, "path": "/life_jacket.X/inc/lora/rfm95w_io.h", "repo_name": "WilseErik/lifeJacket", "src_encoding": "UTF-8", "text": "#ifndef LORA_IO_H\n#define\tLORA_IO_H\n\n#ifdef\t__cplusplus\nextern \"C\" {\n#endif\n\n// =============================================================================\n// Include statements\n// =============================================================================\n\n#include \"stdint.h\"\n#include \"stdbool.h\"\n\n// =============================================================================\n// Public type definitions\n// =============================================================================\n\n//\n// Register addresses\n//\ntypedef enum\n{\n RFM95W_REG_FIFO = 0x00,\n RFM95W_REG_OP_MODE = 0x01,\n RFM95W_REG_UNUSED1 = 0x02,\n RFM95W_REG_UNUSED2 = 0x03,\n RFM95W_REG_UNUSED3 = 0x04,\n RFM95W_REG_UNUSED4 = 0x05,\n RFM95W_REG_FRF_MSB = 0x06,\n RFM95W_REG_FRF_MID = 0x07,\n RFM95W_REG_FRF_LSB = 0x08,\n RFM95W_REG_PA_CONFIG = 0x09,\n RFM95W_REG_PA_RAMP = 0x0A,\n RFM95W_REG_OCP = 0x0B,\n RFM95W_REG_LNA = 0x0C,\n RFM95W_REG_FIFO_ADDR_PTR = 0x0D,\n RFM95W_REG_FIFO_TX_BASE_ADDR = 0x0E,\n RFM95W_REG_FIFO_RX_BASE_ADDR = 0x0F,\n RFM95W_REG_FIFO_RX_CURRENT_ADDR = 0x10,\n RFM95W_REG_IRQ_FLAGS_MASK = 0x11,\n RFM95W_REG_IRQ_FLAGS = 0x12,\n RFM95W_REG_RX_NBR_BYTES = 0x13,\n RFM95W_REG_RX_HEADER_CNT_MSB = 0x14,\n RFM95W_REG_RX_HEADER_CNT_LSB = 0x15,\n RFM95W_REG_RX_PACKET_CNT_VALUE_MSB = 0x16,\n RFM95W_REG_RX_PACKET_CNT_VALUE_LSB = 0x17,\n RFM95W_REG_MODEM_STAT = 0x18,\n RFM95W_REG_PKT_SNR_VALUE = 0x19,\n RFM95W_REG_PKT_RSSI_VALUE = 0x1A,\n RFM95W_REG_RSSI_VALUE = 0x1B,\n RFM95W_REG_HOP_CHANNEL = 0x1C,\n RFM95W_REG_MODEM_CONFIG1 = 0x1D,\n RFM95W_REG_MODEM_CONFIG2 = 0x1E,\n RFM95W_REG_SYMB_TIMEOUT_LSB = 0x1F,\n RFM95W_REG_PREAMBLE_MSB = 0x20,\n RFM95W_REG_PREAMBLE_LSB = 0x21,\n RFM95W_REG_PAYLOAD_LENGTH = 0x22,\n RFM95W_REG_MAX_PAYLOAD_LENGTH = 0x23,\n RFM95W_REG_HOP_PERIOD = 0x24,\n RFM95W_REG_FIFO_RX_BYTE_ADDR = 0x25,\n RFM95W_REG_MODEM_CONFIG3 = 0x26,\n //\n RFM95W_REG_DIO_MAPPING_1 = 0x40,\n RFM95W_REG_DIO_MAPPING_2 = 0x41,\n RFM95W_REG_VERSION = 0x42,\n //\n RFM95W_REG_TCXO = 0x4B,\n RFM95W_REG_PA_DAC = 0x4D,\n RFM95W_REG_FORMER_TEMP = 0x5B,\n RFM95W_REG_AGC_REF = 0x61,\n RFM95W_REG_AGC_THRESH_1 = 0x62,\n RFM95W_REG_AGC_THRESH_2 = 0x63,\n RFM95W_REG_AGC_THRESH_3 = 0x64\n} rfm95w_address_t;\n\n//\n// Operating mode\n//\ntypedef enum\n{\n RFM95W_OP_MODE_SLEEP = 0x00,\n RFM95W_OP_MODE_STAND_BY = 0x01,\n RFM95W_OP_MODE_FSTX = 0x02,\n RFM95W_OP_MODE_FSRX = 0x04,\n RFM95W_OP_MODE_TX = 0x03,\n RFM95W_OP_MODE_RX_CONT = 0x05,\n RFM95W_OP_MODE_RX_SINGLE = 0x06,\n RFM95W_OP_MODE_CAD = 0x07\n} rfm95w_operating_mode_t;\n\n\n//\n// DIO functions\n//\ntypedef enum\n{\n RFM95W_DIO0_FUNC_RX_DONE = 0x00,\n RFM95W_DIO0_FUNC_TX_DONE = 0x01,\n RFM95W_DIO0_FUNC_CAD_DONE = 0x02\n} rfm95w_dio0_func_t;\n\ntypedef enum\n{\n RFM95W_DIO1_FUNC_RX_TIMEOUT = 0x00,\n RFM95W_DIO1_FUNC_FHSS_CHANGE_CHANNEL = 0x01,\n RFM95W_DIO1_FUNC_CAD_DETECTED = 0x02\n} rfm95w_dio1_func_t;\n\ntypedef enum\n{\n RFM95W_DIO2_FUNC_FHSS_CHANGE_CHANNEL_ALT1 = 0x00,\n RFM95W_DIO2_FUNC_FHSS_CHANGE_CHANNEL_ALT2 = 0x01,\n RFM95W_DIO2_FUNC_FHSS_CHANGE_CHANNEL_ALT3 = 0x02\n} rfm95w_dio2_func_t;\n\ntypedef enum\n{\n RFM95W_DIO3_FUNC_CAD_DONE = 0x00,\n RFM95W_DIO3_FUNC_VALID_HEADER = 0x01,\n RFM95W_DIO3_FUNC_PAYLOAD_CRC_ERROR = 0x02\n} rfm95w_dio3_func_t;\n\ntypedef enum\n{\n RFM95W_DIO4_FUNC_CAD_DETECTED = 0x00,\n RFM95W_DIO4_FUNC_PLL_LOCK_ALT1 = 0x01,\n RFM95W_DIO4_FUNC_PLL_LOCK_ALT2 = 0x02\n} rfm95w_dio4_func_t;\n\ntypedef enum\n{\n RFM95W_DIO5_FUNC_MODE_READY = 0x00,\n RFM95W_DIO5_FUNC_CLK_OUT_ALT1 = 0x01,\n RFM95W_DIO5_FUNC_CLK_OUT_ALT2 = 0x02\n} rfm95w_dio5_func_t;\n\ntypedef enum\n{\n RFM95W_BW_7K8 = 0x00,\n RFM95W_BW_10K4 = 0x01,\n RFM95W_BW_15K6 = 0x02,\n RFM95W_BW_20K8 = 0x03,\n RFM95W_BW_31K25 = 0x04,\n RFM95W_BW_41K7 = 0x05,\n RFM95W_BW_62K5 = 0x06,\n RFM95W_BW_125K = 0x07,\n RFM95W_BW_250K = 0x08,\n RFM95W_BW_500K = 0x09,\n} rfm95w_modem_cfg_bw_t;\n\ntypedef enum\n{\n RFM95W_CODING_RATE_4_5 = 0x01,\n RFM95W_CODING_RATE_4_6 = 0x02,\n RFM95W_CODING_RATE_4_7 = 0x03,\n RFM95W_CODING_RATE_4_8 = 0x04\n} rfm95w_coding_rate_t;\n\ntypedef enum\n{\n RFM95W_EXPLICIT_HEADER_MODE = 0x00,\n RFM95W_IMPLICIT_HEADER_MODE = 0x01\n} rfm95w_implicit_header_mode_on_t;\n\ntypedef enum\n{\n RFM95W_SPREADING_FACTOR_64_CHIPS = 6,\n RFM95W_SPREADING_FACTOR_128_CHIPS = 7,\n RFM95W_SPREADING_FACTOR_256_CHIPS = 8,\n RFM95W_SPREADING_FACTOR_512_CHIPS = 9,\n RFM95W_SPREADING_FACTOR_1024_CHIPS = 10,\n RFM95W_SPREADING_FACTOR_2048_CHIPS = 11,\n RFM95W_SPREADING_FACTOR_4096_CHIPS = 12,\n} rfm95w_spreading_factor_t;\n\ntypedef enum\n{\n RFM95W_TX_NORMAL_MODE = 0x00,\n RFM95W_TX_CONTINUOUS_MODE = 0x01\n} rfm95w_tx_continuous_mode_t;\n\ntypedef enum\n{\n RFM95W_PAYLOAD_CRC_DISABLE = 0x00,\n RFM95W_PAYLOAD_CRC_ENABLE = 0x01\n} rfm95w_payload_crc_on_t;\n\ntypedef enum\n{\n RFM95W_IRQ_FLAG_RX_TIMEOUT_MASK = 0x80,\n RFM95W_IRQ_FLAG_RX_DONE_MASK = 0x40,\n RFM95W_IRQ_FLAG_PAYLOAD_CRC_ERROR_MASK = 0x20,\n RFM95W_IRQ_FLAG_VALID_HEADER_MASK = 0x10,\n RFM95W_IRQ_FLAG_TX_DONE_MASK = 0x08,\n RFM95W_IRQ_FLAG_CAD_DONE_MASK = 0x04,\n RFM95W_IRQ_FLAG_FHSS_CHANGE_CHANNEL_MASK = 0x02,\n RFM95W_IRQ_FLAG_CAD_DETECTED_MASK = 0x01\n} rfm95w_irq_flag_t;\n\ntypedef enum\n{\n RFM95W_CHANNEL_FREQUENCY_868_1 = 0x01,\n RFM95W_CHANNEL_FREQUENCY_868_3 = 0x02,\n RFM95W_CHANNEL_FREQUENCY_868_5 = 0x03,\n RFM95W_CHANNEL_FREQUENCY_867_1 = 0x04,\n RFM95W_CHANNEL_FREQUENCY_867_3 = 0x05,\n RFM95W_CHANNEL_FREQUENCY_867_5 = 0x06,\n RFM95W_CHANNEL_FREQUENCY_867_7 = 0x07,\n RFM95W_CHANNEL_FREQUENCY_867_9 = 0x08,\n} rfm95w_channel_frequency_t;\n\n// =============================================================================\n// Global variable declarations\n// =============================================================================\n \n// =============================================================================\n// Global constatants\n// =============================================================================\n\n// =============================================================================\n// Public function declarations\n// =============================================================================\n\n/**\n @brief Writes a value to one register in the RFM95W.\n @param register - Address to write to.\n @param value - Value to be written to the register.\n*/\nvoid rfm95w_io_write(rfm95w_address_t register, uint8_t value);\n\n/**\n @brief Reads the value from one register in the RFM95W.\n @param register - Address to read from.\n @return Value in the read register.\n*/\nuint8_t rfm95w_io_read(rfm95w_address_t register);\n\n/**\n @brief Sets the output function of one of the DIO pins.\n @param dio_number - DIO pin number.\n @param dio_function - Which function should be assigned to the DIO pin.\n*/\nvoid rfm95w_io_set_dio_function(uint8_t dio_number, uint8_t dio_function);\n\n/**\n @brief Sets the operating mode.\n*/\nvoid rfm95w_io_set_operating_mode(rfm95w_operating_mode_t mode);\n\n/**\n * @brief Sets the rx timeout in single RX mode.\n * @param symbols - Timeout length in number of symbols. Must be in [4, 1023]\n */\nvoid rfm95w_io_set_single_rx_timeout(uint16_t symbols);\n\n/**\n * @brief Clears all IRQs.\n */\nvoid rfm95w_io_clear_all_irqs(void);\n\nvoid rfm95w_io_set_bandwidth(rfm95w_modem_cfg_bw_t bandwidth);\n\nvoid rfm95w_io_set_coding_rate(rfm95w_coding_rate_t coding_rate);\n\nvoid rfm95w_io_set_speading_factor(rfm95w_spreading_factor_t spreading_factor);\n\nvoid rfm95w_io_set_frequency(rfm95w_channel_frequency_t frequency);\n\n#ifdef\t__cplusplus\n}\n#endif\n\n#endif\t/* LORA_IO_H */\n\n" } ]
47
safisha/learn-todo
https://github.com/safisha/learn-todo
7fe511362c114b5f668aa96e8584ed3f1ce6eab0
ddd96886911b7fe312a0677407b5698da8863cec
d0a828d0666904122368f0a53991de2ca8925350
refs/heads/master
2023-04-19T13:28:43.703654
2021-05-04T14:16:39
2021-05-04T14:16:39
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6809523701667786, "avg_line_length": 22.22222137451172, "blob_id": "1a1f79753e10687efaaa02a037f1668a1e38755a", "content_id": "67dbf129cfb4fe2595ec6bedd20d405f61830f0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 58, "num_lines": 9, "path": "/todo/models.py", "repo_name": "safisha/learn-todo", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass Todo(models.Model):\n name = models.CharField(max_length=200)\n is_completed = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n" } ]
1
polyg314/smartAPI
https://github.com/polyg314/smartAPI
4c855b6309705e58a58e823e8b3cdbb25fc8e06e
80e3893e88192936bdf1e7a8153a367368b4b22a
0e276b58eb73a740ebd4c948efd97a28038e1825
refs/heads/master
2022-11-26T07:23:11.514331
2020-08-03T17:42:15
2020-08-03T17:42:15
278,686,265
0
0
MIT
2020-07-10T16:48:51
2020-07-06T17:36:57
2020-07-06T17:36:54
null
[ { "alpha_fraction": 0.5075467228889465, "alphanum_fraction": 0.513298511505127, "avg_line_length": 39.51900863647461, "blob_id": "4644d290e9f154db2e33c0f625a67868a5b73756", "content_id": "4707e93dae326e298bf78c73c782d42c7aa8fb8d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24514, "license_type": "permissive", "max_line_length": 142, "num_lines": 605, "path": "/src/web/api/es.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "#pylint: disable=unexpected-keyword-arg\n# non-essential parameters are declared with decorators in es.py\n# https://github.com/elastic/elasticsearch-py/issues/274\n\nimport json\nimport logging\nimport string\nimport sys\nfrom datetime import date, datetime\nfrom shlex import shlex\n\nimport boto3\nfrom elasticsearch import Elasticsearch, RequestError, helpers\n\nfrom .mapping import smart_api_mapping\nfrom .transform import (SWAGGER2_INDEXED_ITEMS, APIMetadata, decode_raw,\n get_api_metadata_by_url, polite_requests)\n\nES_HOST = 'localhost:9200'\nES_INDEX_NAME = 'smartapi_oas3'\nES_DOC_TYPE = 'api'\n\n\ndef ask(prompt, options='YN'):\n '''Prompt Yes or No,return the upper case 'Y' or 'N'.'''\n options = options.upper()\n while 1:\n s = input(prompt+'[%s]' % '|'.join(list(options))).strip().upper()\n if s in options:\n break\n return s\n\n\ndef get_datestamp():\n d = date.today()\n return d.strftime('%Y%m%d')\n\n\ndef get_es(es_host=None):\n es_host = es_host or ES_HOST\n es = Elasticsearch(es_host, timeout=120)\n return es\n\n\ndef split_ids(q):\n '''split input query string into list of ids.\n any of \" \\t\\n\\x0b\\x0c\\r|,+\" as the separator,\n but perserving a phrase if quoted\n (either single or double quoted)\n more detailed rules see:\n http://docs.python.org/2/library/shlex.html#parsing-rules\n\n e.g. split_ids('CDK2 CDK3') --> ['CDK2', 'CDK3']\n split_ids('\"CDK2 CDK3\"\\n CDk4') --> ['CDK2 CDK3', 'CDK4']\n\n '''\n # Python3 strings are already unicode, .encode\n # now returns a bytearray, which cannot be searched with\n # shlex. For now, do this terrible thing until we discuss\n if sys.version_info.major == 3:\n lex = shlex(q, posix=True)\n else:\n lex = shlex(q.encode('utf8'), posix=True)\n lex.whitespace = ' \\t\\n\\x0b\\x0c\\r|,+'\n lex.whitespace_split = True\n lex.commenters = ''\n if sys.version_info.major == 3:\n ids = [x.strip() for x in list(lex)]\n else:\n ids = [x.decode('utf8').strip() for x in list(lex)]\n ids = [x for x in ids if x]\n return ids\n\n\ndef create_index(index_name=None, es=None):\n index_name = index_name or ES_INDEX_NAME\n body = {}\n mapping = {\"mappings\": smart_api_mapping}\n body.update(mapping)\n _es = es or get_es()\n print(_es.indices.create(index=index_name, body=body), end=\" \")\n\n\ndef _get_hit_object(hit):\n obj = hit.get('fields', hit.get('_source', {}))\n if '_id' in hit:\n obj['_id'] = hit['_id']\n return obj\n\n\nclass ESQuery():\n def __init__(self, index=None, doc_type=None, es_host=None):\n self._es = get_es(es_host)\n self._index = index or ES_INDEX_NAME\n self._doc_type = doc_type or ES_DOC_TYPE\n\n def exists(self, api_id):\n '''return True/False if the input api_doc has existing metadata\n object in the index.\n '''\n return self._es.exists(index=self._index, doc_type=self._doc_type, id=api_id)\n\n # used in APIHandler [POST]\n def save_api(self, api_doc, save_v2=False, overwrite=False, user_name=None,\n override_owner=False, warn_on_identical=False, dryrun=False):\n '''Adds or updates a compatible-format API document in the SmartAPI index, making it searchable.\n :param save_v2: allow a swagger v2 document pass validation when set to True\n :param overwrite: allow overwriting an existing document if the user_name provided matches the record\n :param user_name: when overwrite is set to to true, and override_owner not, \n to allow overwriting the existing document user_name must match that of the document.\n :param override_owner: allow overwriting regardless of ownership when overwrite is also set to True\n :param warn_on_identical: consider rewriting the existing docuement with an identical one unsuccessful\n used in refresh_all() to exclude APIs with no change from update count\n :param dryrun: only validate the schema and test the overwrite settings, do not actually save.\n '''\n metadata = APIMetadata(api_doc)\n\n # validate document schema\n valid = metadata.validate(raise_error_on_v2=not save_v2)\n if not valid['valid']:\n valid['success'] = False\n valid['error'] = '[Validation] ' + valid['error']\n return valid\n\n # avoid unintended overwrite\n api_id = metadata.encode_api_id()\n doc_exists = self.exists(api_id)\n if doc_exists:\n if not overwrite:\n is_archived = self._es.get(\n index=self._index, doc_type=self._doc_type, id=api_id, _source=[\"_meta\"]).get(\n '_source', {}).get(\n '_meta', {}).get(\n '_archived', False) == 'true'\n if not is_archived:\n return {\"success\": False, \"error\": \"[Conflict] API exists. Not saved.\"}\n elif not override_owner:\n _owner = self._es.get(\n index=self._index, doc_type=self._doc_type, id=api_id, _source=[\"_meta\"]).get(\n '_source', {}).get(\n '_meta', {}).get(\n 'github_username', '')\n if _owner != user_name:\n return {\"success\": False, \"error\": \"[Conflict] User mismatch. Not Saved.\"}\n\n # identical document\n _doc = metadata.convert_es()\n if doc_exists:\n _raw_stored = self._es.get(\n index=self._index, doc_type=self._doc_type, id=api_id, _source=[\"~raw\"]).get(\n '_source', {})['~raw']\n if decode_raw(\n _raw_stored, as_string=True) == decode_raw(\n _doc.get('~raw'),\n as_string=True):\n if warn_on_identical:\n return {\"success\": True, '_id': api_id, \"warning\": \"[Conflict] No change in document.\"}\n else:\n return {\"success\": True, '_id': api_id}\n\n # save to es index\n if dryrun:\n return {\"success\": True, '_id': \"[Dryrun] this is a dryrun. API is not saved.\", \"dryrun\": True}\n try:\n self._es.index(index=self._index, doc_type=self._doc_type,\n body=_doc, id=api_id, refresh=True)\n except RequestError as e:\n return {\"success\": False, \"error\": \"[ES]\" + str(e)}\n return {\"success\": True, '_id': api_id}\n\n def _get_api_doc(self, api_doc, with_meta=True):\n doc = decode_raw(api_doc.get('~raw', ''))\n if with_meta:\n doc[\"_meta\"] = api_doc.get('_meta', {})\n doc[\"_id\"] = api_doc[\"_id\"]\n return doc\n\n # used in APIMetaDataHandler [GET]\n def get_api(self, api_name, fields=None, with_meta=True, return_raw=False, size=None, from_=0):\n if api_name == 'all':\n query = {'query': {\"bool\": {\"must_not\": {\n \"term\": {\"_meta._archived\": \"true\"}}}}}\n else:\n query = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\n \"match\": {\n \"_id\": {\n \"query\": api_name\n }\n }\n },\n {\n \"term\": {\n \"_meta.slug\": api_name\n }\n }\n ],\n \"must_not\": {\"term\": {\"_meta._archived\": \"true\"}}\n }\n }\n }\n if fields and fields not in [\"all\", [\"all\"]]:\n query[\"_source\"] = fields\n if size and isinstance(size, int):\n query['size'] = min(size, 100) # set max size to 100 for now.\n if from_ and isinstance(from_, int) and from_ > 0:\n query['from'] = from_\n res = self._es.search(self._index, self._doc_type, query)\n if return_raw == '2':\n return res\n res = [_get_hit_object(d) for d in res['hits']['hits']]\n if not return_raw:\n try:\n res = [self._get_api_doc(x, with_meta=with_meta) for x in res]\n except ValueError as e:\n res = {'success': False, 'error': str(e)}\n if len(res) == 1:\n res = res[0]\n return res\n\n def _do_aggregations(self, _field, agg_name, size):\n query = {\n \"query\": {\n \"bool\": {\n \"must_not\": {\"term\": {\"_meta._archived\": True}}\n }\n },\n \"aggs\": {\n agg_name: {\n \"terms\": {\n \"field\": _field,\n \"size\": size\n }\n }\n }\n }\n res = self._es.search(self._index, self._doc_type, query, size=0)\n res = res[\"aggregations\"]\n return res\n\n def get_api_id_from_slug(self, slug_name):\n query = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\"term\": {\"_meta.slug\": slug_name}},\n {\"ids\": {\"values\": [slug_name]}}\n ]\n }\n }\n }\n try:\n res = self._es.search(\n index=self._index, doc_type=self._doc_type, body=query, size=1, _source=False)\n except:\n return\n if res.get('hits', {}).get('hits', []):\n return res['hits']['hits'][0]['_id']\n\n # used in ValueSuggestionHandler [GET]\n def value_suggestion(self, field, size=100, use_raw=True):\n \"\"\"return a list of existing values for the given field.\"\"\"\n _field = field + \".raw\" if use_raw else field\n agg_name = 'field_values'\n res = self._do_aggregations(_field, agg_name, size)\n return res\n\n def delete_api(self, id):\n \"\"\"delete a saved API metadata, be careful with the deletion.\"\"\"\n if ask(\"Are you sure to delete this API metadata?\") == 'Y':\n print(self._es.delete(index=self._index,\n doc_type=self._doc_type, id=id))\n\n # used in APIMetaDataHandler [DELETE]\n def archive_api(self, id, user):\n \"\"\" function to set an _archive flag for an API, making it\n unsearchable from the front end, takes an id identifying the API,\n and a user that must match the APIs creator. \"\"\"\n # does the api exist?\n try:\n _doc = self._es.get(index=self._index,\n doc_type=self._doc_type, id=id)\n except:\n _doc = None\n if not _doc:\n return (404, {\"success\": False, \"error\": \"Could not retrieve API '{}' to delete\".format(id)})\n # is the api unarchived?\n if _doc.get('_source', {}).get('_meta', {}).get('_archived', False):\n return (405, {\"success\": False, \"error\": \"API '{}' already deleted\".format(id)})\n # is this user the owner of this api?\n _user = user.get('login', None)\n if _doc.get('_source', {}).get('_meta', {}).get('github_username', '') != _user:\n return (405, {\"success\": False, \"error\": \"User '{user}' is not the owner of API '{id}'\".format(user=_user, id=id)})\n # do the archive, deregister the slug name\n _doc['_source']['_meta']['_archived'] = 'true'\n _doc['_source']['_meta'].pop('slug', None)\n self._es.index(index=self._index, doc_type=self._doc_type,\n id=id, body=_doc['_source'], refresh=True)\n\n return (200, {\"success\": True, \"message\": \"API '{}' successfully deleted\".format(id)})\n\n # used in GitWebhookHandler [POST] and self.backup_all()\n def fetch_all(self, as_list=False, id_list=[], query={}, ignore_archives=False):\n \"\"\"return a generator of all docs from the ES index.\n return a list instead if as_list is True.\n if query is passed, it returns docs that match the query.\n else if id_list is passed, it returns only docs from the given ids.\n \"\"\"\n if query:\n _query = query\n elif id_list:\n _query = {\"query\": {\"ids\": {\"type\": ES_DOC_TYPE, \"values\": id_list}}}\n elif ignore_archives:\n _query = {\"query\": {\"bool\": {\"must_not\": {\"term\": {\"_meta._archived\": \"true\"}}}}}\n else:\n _query = {\"query\": {\"match_all\": {}}}\n\n scan_res = helpers.scan(client=self._es, query=_query,\n index=self._index, doc_type=self._doc_type)\n\n def _fn(x):\n x['_source'].setdefault('_id', x['_id'])\n return x['_source']\n doc_iter = (_fn(x) for x in scan_res) # return docs only\n if as_list:\n return list(doc_iter)\n else:\n return doc_iter\n\n def backup_all(self, outfile=None, ignore_archives=False, aws_s3_bucket=None):\n \"\"\"back up all docs into a output file.\"\"\"\n # get the real index name in case self._index is an alias\n logging.info(\"Backup started.\")\n alias_d = self._es.indices.get_alias(self._index)\n assert len(alias_d) == 1\n index_name = list(alias_d.keys())[0]\n default_name = \"{}_backup_{}.json\".format(index_name, get_datestamp())\n outfile = outfile or default_name\n doc_li = self.fetch_all(as_list=True, ignore_archives=ignore_archives)\n if aws_s3_bucket:\n location_prompt = 'on S3'\n s3 = boto3.resource('s3')\n s3.Bucket(aws_s3_bucket).put_object(\n Key='db_backup/{}'.format(outfile), Body=json.dumps(doc_li, indent=2))\n else:\n out_f = open(outfile, 'w')\n location_prompt = 'locally'\n out_f = open(outfile, 'w')\n json.dump(doc_li, out_f, indent=2)\n out_f.close()\n logging.info(\"Backed up %s docs in \\\"%s\\\" %s.\", len(doc_li), outfile, location_prompt)\n\n def restore_all(self, backupfile, index_name, overwrite=False):\n \"\"\"restore all docs from the backup file to a new index.\"\"\"\n\n def legacy_backupfile_support_path_str(_doc):\n _paths = []\n if 'paths' in _doc:\n for path in _doc['paths']:\n _paths.append({\n \"path\": path,\n \"pathitem\": _doc['paths'][path]\n })\n if _paths:\n _doc['paths'] = _paths\n return _doc\n\n def legacy_backupfile_support_rm_flds(_doc):\n _d = {\"_meta\": _doc['_meta']}\n for key in SWAGGER2_INDEXED_ITEMS:\n if key in _doc:\n _d[key] = _doc[key]\n _d['~raw'] = _doc['~raw']\n return _d\n\n if self._es.indices.exists(index_name):\n if overwrite and ask(\"Warning: index \\\"{}\\\" exists. Do you want to overwrite it?\".format(index_name)) == 'Y':\n self._es.indices.delete(index=index_name)\n else:\n print(\n \"Error: index \\\"{}\\\" exists. Try a different index_name.\".format(index_name))\n return\n\n print(\"Loading docs from \\\"{}\\\"...\".format(backupfile), end=\" \")\n in_f = open(backupfile)\n doc_li = json.load(in_f)\n print(\"Done. [{} Documents]\".format(len(doc_li)))\n\n print(\"Creating index...\", end=\" \")\n create_index(index_name, es=self._es)\n print(\"Done.\")\n\n print(\"Indexing...\", end=\" \")\n swagger_v2_count = 0\n openapi_v3_count = 0\n for _doc in doc_li:\n _id = _doc.pop('_id')\n if \"swagger\" in _doc:\n swagger_v2_count += 1\n _doc = legacy_backupfile_support_rm_flds(_doc)\n _doc = legacy_backupfile_support_path_str(_doc)\n elif \"openapi\" in _doc:\n openapi_v3_count += 1\n else:\n print('\\n\\tWARNING: ', _id, 'No Version.')\n self._es.index(index=index_name,\n doc_type=self._doc_type, body=_doc, id=_id)\n print(swagger_v2_count, ' Swagger Objects and ',\n openapi_v3_count, ' Openapi Objects. ')\n print(\"Done.\")\n\n def _validate_slug_name(self, slug_name):\n ''' Function that determines whether slug_name is a valid slug name '''\n _valid_chars = string.ascii_letters + string.digits + \"-_~\"\n _slug = slug_name.lower()\n\n # reserved for dev node, normal web functioning\n if _slug in ['www', 'dev', 'smart-api']:\n return (False, {\"success\": False, \"error\": \"Slug name '{}' is reserved, please choose another\".format(_slug)})\n\n # length requirements\n if len(_slug) < 4 or len(_slug) > 50:\n return (False, {\"success\": False, \"error\": \"Slug name must be between 4 and 50 chars\"})\n\n # character requirements\n if not all([x in _valid_chars for x in _slug]):\n return (False, {\"success\": False, \"error\": \"Slug name contains invalid characters. Valid characters: '{}'\".format(_valid_chars)})\n\n # does it exist already?\n _query = {\n \"query\": {\n \"bool\": {\n \"should\": [\n {\"term\": {\"_meta.slug.raw\": _slug}},\n {\"ids\": {\"values\": [_slug]}}\n ]\n }\n }\n }\n\n if len(\n self._es.search(\n index=self._index, doc_type=self._doc_type, body=_query, _source=False).get(\n 'hits', {}).get('hits', [])) > 0:\n return (False, {\"success\": False, \"error\": \"Slug name '{}' already exists, please choose another\".format(_slug)})\n\n # good name\n return (True, {})\n\n # used in APIMetaDataHandler [PUT]\n def set_slug_name(self, _id, user, slug_name):\n ''' set the slug name of API _id to slug_name. '''\n if not self.exists(_id):\n return (404, {\"success\": False, \"error\": \"Could not retrieve API '{}' to set slug name\".format(_id)})\n\n _user = self._es.get(\n index=self._index, doc_type=self._doc_type, id=_id, _source=[\"_meta\"]).get(\n '_source', {}).get(\n '_meta', {}).get(\n 'github_username', '')\n\n # Make sure this is the correct user\n if user.get('login', None) != _user:\n return (405, {\"success\": False, \"error\": \"User '{}' is not the owner of API '{}'\".format(user.get('login', None), _id)})\n\n # validate the slug name\n _valid, _resp = self._validate_slug_name(slug_name=slug_name)\n\n if not _valid:\n return (405, _resp)\n\n # update the slug name\n self._es.update(index=self._index, doc_type=self._doc_type, id=_id, body={\n \"doc\": {\"_meta\": {\"slug\": slug_name.lower()}}}, refresh=True)\n\n return (200, {\"success\": True, \"{}._meta.slug\".format(_id): slug_name.lower()})\n\n # used in APIMetaDataHandler [DELETE]\n def delete_slug(self, _id, user, slug_name):\n ''' delete the slug of API _id. '''\n if not self.exists(_id):\n return (404, {\"success\": False, \"error\": \"Could not retrieve API '{}' to delete slug name\".format(_id)})\n\n doc = self._es.get(index=self._index,\n doc_type=self._doc_type, id=_id).get('_source', {})\n\n # Make sure this is the correct user\n if user.get('login', None) != doc.get('_meta', {}).get('github_username', ''):\n return (405, {\"success\": False, \"error\": \"User '{}' is not the owner of API '{}'\".format(user.get('login', None), _id)})\n\n # Make sure this is the correct slug name\n if doc.get('_meta', {}).get('slug', '') != slug_name:\n return (405, {\"success\": False, \"error\": \"API '{}' slug name is not '{}'\".format(_id, slug_name)})\n\n # do the delete\n doc['_meta'].pop('slug')\n\n self._es.index(index=self._index, doc_type=self._doc_type,\n body=doc, id=_id, refresh=True)\n\n return (200, {\"success\": True, \"{}\".format(_id): \"slug '{}' deleted\".format(slug_name)})\n\n # used in APIMetaDataHandler [PUT]\n def refresh_one_api(self, _id, user, dryrun=True):\n ''' authenticate the API document of specified _id correspond to the specified user,\n and refresh the API document based on its saved metadata url '''\n\n # _id validation\n try:\n api_doc = self._es.get(\n index=self._index, doc_type=self._doc_type, id=_id)\n except:\n return (404, {\"success\": False, \"error\": \"Could not retrieve API '{}' to refresh\".format(_id)})\n api_doc['_source'].update({'_id': api_doc['_id']})\n\n # ownership validation\n _user = user.get('login', None)\n if api_doc.get('_source', {}).get('_meta', {}).get('github_username', '') != _user:\n return (405, {\"success\": False, \"error\": \"User '{user}' is not the owner of API '{id}'\".format(user=_user, id=_id)})\n\n status = self._refresh_one(\n api_doc=api_doc['_source'], user=_user, dryrun=dryrun)\n if not dryrun:\n self._es.indices.refresh(index=self._index)\n\n if status.get('success', False):\n return (200, status)\n else:\n return (405, status)\n\n def _refresh_one(self, api_doc, user=None, override_owner=False, dryrun=True,\n error_on_identical=False, save_v2=False):\n ''' refresh the given API document object based on its saved metadata url '''\n _id = api_doc['_id']\n _meta = api_doc['_meta']\n\n res = get_api_metadata_by_url(_meta['url'])\n if res and isinstance(res, dict):\n if res.get('success', None) is False:\n res['error'] = '[Request] '+res.get('error', '')\n status = res\n else:\n _meta['timestamp'] = datetime.now().isoformat()\n res['_meta'] = _meta\n status = self.save_api(\n res, user_name=user, override_owner=override_owner, overwrite=True,\n dryrun=dryrun, warn_on_identical=error_on_identical, save_v2=True)\n else:\n status = {'success': False, 'error': 'Invalid input data.'}\n\n return status\n\n def refresh_all(\n self, id_list=[],\n dryrun=True, return_status=False, use_etag=True, ignore_archives=True):\n '''refresh saved API documents based on their metadata urls.\n\n :param id_list: the list of API documents to perform the refresh operation\n :param ignore_archives:\n :param dryrun: \n :param use_etag: by default, HTTP ETag is used to speed up version detection\n '''\n updates = 0\n status_li = []\n logging.info(\"Refreshing API metadata:\")\n\n for api_doc in self.fetch_all(id_list=id_list, ignore_archives=ignore_archives):\n\n _id, status = api_doc['_id'], ''\n\n if use_etag:\n _res = polite_requests(api_doc.get('_meta', {}).get('url', ''), head=True)\n if _res.get('success'):\n res = _res.get('response')\n etag_local = api_doc.get('_meta', {}).get('ETag', '')\n etag_server = res.headers.get('ETag', 'N').strip('W/\"')\n if etag_local == etag_server:\n status = \"OK (Via Etag)\"\n\n if not status:\n res = self._refresh_one(\n api_doc, dryrun=dryrun, override_owner=True, error_on_identical=True,\n save_v2=True)\n if res.get('success'):\n if res.get('warning'):\n status = 'OK'\n else:\n status = \"OK Updated\"\n updates += 1\n else:\n status = \"ERR \" + res.get('error')[:60]\n\n status_li.append((_id, status))\n logging.info(\"%s: %s\", _id, status)\n\n logging.info(\"%s: %s APIs refreshed. %s Updates.\", get_datestamp(), len(status_li), updates)\n\n if dryrun:\n logging.warning(\"This is a dryrun! No actual changes have been made.\")\n logging.warning(\"When ready, run it again with \\\"dryrun=False\\\" to apply changes.\")\n\n return status_li\n" }, { "alpha_fraction": 0.4357120394706726, "alphanum_fraction": 0.44325682520866394, "avg_line_length": 35.774566650390625, "blob_id": "f2d47fe7e3a79ab4c9890f1906d5baee9a830039", "content_id": "c0d11f9c5af3326c89feea29460727555cab70af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6362, "license_type": "permissive", "max_line_length": 218, "num_lines": 173, "path": "/src/static/js/smartapi.js", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "function check_user(){\n $.ajax({url: \"/user\", success: function(result){\n var html = \"\";\n var side_html = \"\";\n if (result.login){\n // side_html = '<li><a href=\"/logout?next=' + window.location.pathname + '\">Logout</a></li>';\n if (result.avatar_url){\n $('#navPhoto').attr(\"src\", result.avatar_url);\n html += \"<li><a class='tooltipped' data-tooltip='My Dashboard' id='navPhotoLink' href='/dashboard'><img id='navPhoto' class='circle responsive-img' src='\"+result.avatar_url+\"' alt='user photo'></a></li>\";\n }\n html += \"<li><a class='btn red' href='/logout?next=\" + window.location.pathname + \"'>Logout</a></li>\";\n side_html += \"<li><a class='blue-text' href='/dashboard'>My Dashboard</a></li><li><a class='red-text' href='/logout?next=\" + window.location.pathname + \"'>Logout</a></li>\";\n }else{\n html += \"<li><a class='btn green' href='/oauth'>Login</a></li>\";\n side_html += html;\n }\n // Append new items to navigation\n $(\"#user_link\").append(html).promise().done(function(){\n $('.tooltipped').tooltip();\n $(\".dropdown-button\").dropdown();\n });\n $(\"#side_user_link\").append(side_html);\n }});\n};\n\n\n/* for reg_form.html */\nfunction save_api(form, overwrite, savev2){\n $(\"#submit_mask\").modal(\"open\");\n var data = $(form).serialize();\n if (overwrite){\n data += \"&overwrite=true\";\n }\n if (savev2){\n data += \"&save_v2=true\";\n }\n $.ajax({\n url: \"/api\",\n type: \"post\",\n data: data,\n success: function(response) {\n $(\"#submit_mask\").modal(\"close\");\n if (response.success){\n var msg = 'API metadata saved!';\n if (response.dryrun){\n swal({\n imageUrl: '/static/img/api-dryrun.svg',\n imageWidth: 300,\n imageAlt: 'Dry Run',\n title: 'Awesome! But...',\n html: \"Because this is a dryrun your data has not been saved. If you want to register your API, uncheck 'dry run' and try again.\",\n })\n\n }else if (response.success){\n swal({\n imageUrl: '/static/img/api-sucess.svg',\n imageWidth: 300,\n title: 'Good Job!',\n html: \"You can see your API documentation <b><a href='/registry?q=\"+response._id+\"'>HERE</a></b>\",\n })\n }\n\n }\n else{\n if ( response.hasOwnProperty(\"swagger_v2\") && response.swagger_v2 ){\n // -----------\n swal({\n title: \"Swagger V2 Detected\",\n text: \"Only OpenAPI V3 will experience full functionality. Continue saving anyway?\",\n imageUrl: '/static/img/api-v2.svg',\n imageWidth: 300,\n showCancelButton: true,\n confirmButtonText: 'Yes, save it!',\n footer: '<a target=\"_blank\" href=\"https://github.com/SmartAPI/smartAPI-Specification/blob/OpenAPI.next/versions/3.0.0.md\">Learn More about OpenAPI V3 Specification</a>'\n })\n .then((willSave) => {\n\n if (willSave.value) {\n save_api(form, true, true);\n\n } else if (!willSave.value) {\n swal({\n title: \"Uh-oh!\",\n text: \"Your Data Has Not Been Saved\",\n imageUrl: '/static/img/api-fail.svg',\n imageWidth: 300,\n confirmButtonText: 'OK',\n })\n }\n });\n // ----------\n }\n if (response.error.indexOf(\"API exists\") != -1){\n swal({\n title: 'This API already exists, do you want to overwrite it?',\n text: \"You won't be able to revert this!\",\n imageUrl: '/static/img/api-overwrite.svg',\n imageWidth: 300,\n showCancelButton: true,\n confirmButtonColor: '#3085d6',\n cancelButtonColor: '#d33',\n confirmButtonText: 'Yes, overwrite it!'\n }).then((willSave)=>{\n if( willSave.value){\n save_api(form, true);\n\n }else if (!willSave.value) {\n swal({\n title: \"Uh-oh!\",\n text: \"Your Data Has Not Been Saved\",\n imageUrl: '/static/img/api-fail.svg',\n imageWidth: 300,\n confirmButtonText: 'OK',\n })\n }\n });\n }\n else if( !response.hasOwnProperty(\"swagger_v2\") ){\n swal({\n imageUrl: '/static/img/api-error.svg',\n imageWidth: 300,\n title: response.valid==false?'ValidationError':'Error',\n text:response.error\n });\n }\n }\n }\n });\n};\n\nfunction initialize_form() {\n // initialize API registration form\n $('#apireg_form').validate({\n rules: {\n url: {\n required: true\n }\n },\n submitHandler: function(form){\n save_api(form);\n }\n });\n\n $('#api_search_form').validate({\n rules:{\n query: {\n required: true\n }\n }\n });\n};\n\n\n$(function(){\n // Initialize collapse button\n $('.button-collapse').sideNav({\n menuWidth: 240, // Default is 240\n edge: 'left', // Choose the horizontal origin\n closeOnClick: true // Closes side-nav on <a> clicks, useful for Angular/Meteor\n }\n );\n // Initialize dropdown button\n $(\".dropdown-button\").dropdown();\n // Initialize modal\n $('.modal').modal({dismissible: true});\n // Check user status\n check_user();\n\n // for reg_form.html\n initialize_form();\n });\n\n // Particles\n" }, { "alpha_fraction": 0.5303789973258972, "alphanum_fraction": 0.5351914763450623, "avg_line_length": 36.923954010009766, "blob_id": "aeeeee47a21f7b293c08ab2b8fe9458c8c5ee0ef", "content_id": "fbbd73425b03a9cedf1c0a80e2a8d5a2af106c59", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9974, "license_type": "permissive", "max_line_length": 159, "num_lines": 263, "path": "/src/web/api/handlers.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "import datetime\nimport hashlib\nimport hmac\nimport json\nimport re\nfrom collections import OrderedDict\n\nimport tornado.escape\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport yaml\n\nfrom biothings.web.api.es.handlers import \\\n QueryHandler as BioThingsESQueryHandler\nfrom biothings.web.api.es.handlers.base_handler import BaseESRequestHandler\n\nfrom .es import ESQuery\nfrom .transform import APIMetadata, get_api_metadata_by_url\n\nfrom utils.slack_notification import send_slack_msg\n\nclass BaseHandler(BaseESRequestHandler):\n\n def get_current_user(self):\n user_json = self.get_secure_cookie(\"user\")\n if not user_json:\n return None\n return json.loads(user_json.decode('utf-8'))\n\n\nclass ValidateHandler(BaseHandler):\n def _validate(self, data):\n if data and isinstance(data, dict):\n metadata = APIMetadata(data)\n valid = metadata.validate()\n return self.return_json(valid)\n else:\n return self.return_json({\"valid\": False, \"error\": \"The input url does not contain valid API metadata.\"})\n\n def get(self):\n url = self.get_argument('url', None)\n if url:\n data = get_api_metadata_by_url(url)\n if data.get('success', None) is False:\n self.return_json(data)\n else:\n self._validate(data)\n else:\n self.return_json(\n {\"valid\": False, \"error\": \"Need to provide an input url first.\"})\n\n def post(self):\n if self.request.body:\n try:\n data = tornado.escape.json_decode(self.request.body)\n except ValueError:\n try:\n data = yaml.load(self.request.body, Loader=yaml.SafeLoader)\n except (yaml.scanner.ScannerError,\n yaml.parser.ParserError):\n return self.return_json({\"valid\": False, \"error\": \"The input request body does not contain valid API metadata.\"})\n self._validate(data)\n else:\n self.return_json(\n {\"valid\": False, \"error\": \"Need to provide data in the request body first.\"})\n\n\nclass APIHandler(BaseHandler):\n def post(self):\n # check if a logged in user\n user = self.get_current_user()\n if not user:\n res = {'success': False,\n 'error': 'Authenticate first with your github account.'}\n self.set_status(401)\n self.return_json(res)\n else:\n # save an API metadata\n overwrite = self.get_argument('overwrite', '').lower()\n overwrite = overwrite in ['1', 'true']\n dryrun = self.get_argument('dryrun', '').lower()\n dryrun = dryrun in ['on', '1', 'true']\n save_v2 = self.get_argument('save_v2', '').lower()\n save_v2 = save_v2 in ['1', 'true']\n url = self.get_argument('url', None)\n if url:\n data = get_api_metadata_by_url(url)\n # try:\n # data = tornado.escape.json_decode(data)\n # except ValueError:\n # data = None\n if data and isinstance(data, dict):\n if data.get('success', None) is False:\n self.return_json(data)\n else:\n _meta = {\n \"github_username\": user['login'],\n 'url': url,\n 'timestamp': datetime.datetime.now().isoformat()\n }\n data['_meta'] = _meta\n esq = ESQuery()\n res = esq.save_api(\n data, overwrite=overwrite, dryrun=dryrun, user_name=user['login'], save_v2=save_v2)\n self.return_json(res)\n ## send notification to slack \n if(res[\"success\"] == True):\n send_slack_msg(data, res, user['login']) \n\n else:\n self.return_json(\n {'success': False, 'error': 'Invalid input data.'})\n\n else:\n self.return_json(\n {'success': False, 'error': 'missing required parameter.'})\n\n\nclass APIMetaDataHandler(BaseHandler):\n esq = ESQuery()\n\n def get(self, api_name):\n '''return API metadata for a matched api_name,\n if api_name is \"all\", return a list of all APIs\n '''\n fields = self.get_argument('fields', None)\n out_format = self.get_argument('format', 'json').lower()\n return_raw = self.get_argument('raw', False)\n with_meta = self.get_argument('meta', False)\n size = self.get_argument('size', None)\n from_ = self.get_argument('from', 0)\n try:\n # size capped to 100 for now by get_api method below.\n size = int(size)\n except (TypeError, ValueError):\n size = None\n try:\n from_ = int(from_)\n except (TypeError, ValueError):\n from_ = 0\n if fields:\n fields = fields.split(',')\n res = self.esq.get_api(api_name, fields=fields, with_meta=with_meta,\n return_raw=return_raw, size=size, from_=from_)\n if out_format == 'yaml':\n self.return_yaml(res)\n else:\n self.return_json(res)\n\n def put(self, api_name):\n ''' refresh API metadata for a matched api_name,\n checks to see if current user matches the creating user.'''\n slug_name = self.get_argument('slug', None)\n dryrun = self.get_argument('dryrun', '').lower()\n dryrun = dryrun in ['on', '1', 'true']\n # must be logged in first\n user = self.get_current_user()\n if not user:\n res = {'success': False,\n 'error': 'Authenticate first with your github account.'}\n self.set_status(401)\n else:\n if slug_name:\n (status, res) = self.esq.set_slug_name(\n _id=api_name, user=user, slug_name=slug_name)\n else:\n (status, res) = self.esq.refresh_one_api(\n _id=api_name, user=user, dryrun=dryrun)\n self.set_status(status)\n self.return_json(res)\n\n def delete(self, api_name):\n '''delete API metadata for a matched api_name,\n checks to see if current user matches the creating user.'''\n # must be logged in first\n user = self.get_current_user()\n slug_name = self.get_argument('slug', '').lower()\n if not user:\n res = {'success': False,\n 'error': 'Authenticate first with your github account.'}\n self.set_status(401)\n elif slug_name:\n (status, res) = self.esq.delete_slug(\n _id=api_name, user=user, slug_name=slug_name)\n self.set_status(status)\n else:\n (status, res) = self.esq.archive_api(api_name, user)\n self.set_status(status)\n self.return_json(res)\n\n\nclass ValueSuggestionHandler(BaseHandler):\n esq = ESQuery()\n\n def get(self):\n field = self.get_argument('field', None)\n try:\n size = int(self.get_argument('size', 100))\n except:\n size = 100\n if field:\n res = self.esq.value_suggestion(field, size=size)\n else:\n res = {'error': 'missing required \"field\" parameter'}\n self.return_json(res)\n\n\nclass GitWebhookHandler(BaseHandler):\n esq = ESQuery()\n\n def post(self):\n # do message authentication\n digest_obj = hmac.new(key=self.web_settings.API_KEY.encode(\n ), msg=self.request.body, digestmod=hashlib.sha1)\n if not hmac.compare_digest('sha1=' + digest_obj.hexdigest(), self.request.headers.get('X-Hub-Signature', '')):\n self.set_status(405)\n self.return_json(\n {'success': False, 'error': 'Invalid authentication'})\n return\n data = tornado.escape.json_decode(self.request.body)\n # get repository owner name\n repo_owner = data.get('repository', {}).get(\n 'owner', {}).get('name', None)\n if not repo_owner:\n self.set_status(405)\n self.return_json(\n {'success': False, 'error': 'Cannot get repository owner'})\n return\n # get repo name\n repo_name = data.get('repository', {}).get('name', None)\n if not repo_name:\n self.set_status(405)\n self.return_json(\n {'success': False, 'error': 'Cannot get repository name'})\n return\n # find all modified files in all commits\n modified_files = set()\n for commit_obj in data.get('commits', []):\n for fi in commit_obj.get('added', []):\n modified_files.add(fi)\n for fi in commit_obj.get('modified', []):\n modified_files.add(fi)\n # build query\n _query = {\"query\": {\"bool\": {\"should\": [\n {\"regexp\": {\"_meta.url.raw\": {\"value\": '.*{owner}/{repo}/.*/{fi}'.format(owner=re.escape(repo_owner), repo=re.escape(repo_name), fi=re.escape(fi)),\n \"max_determinized_states\": 200000}}} for fi in modified_files]}}}\n # get list of ids that need to be refreshed\n ids_refresh = [x['_id'] for x in self.esq.fetch_all(query=_query)]\n # if there are any ids to refresh, do it\n if ids_refresh:\n self.esq.refresh_all(id_list=ids_refresh, dryrun=False)\n\n\nAPP_LIST = [\n (r'/?', APIHandler),\n (r'/query/?', BioThingsESQueryHandler),\n (r'/validate/?', ValidateHandler),\n (r'/metadata/(.+)/?', APIMetaDataHandler),\n (r'/suggestion/?', ValueSuggestionHandler),\n (r'/webhook_payload/?', GitWebhookHandler),\n]\n" }, { "alpha_fraction": 0.5418070554733276, "alphanum_fraction": 0.5638591051101685, "avg_line_length": 29.80188751220703, "blob_id": "552b11595103ca074a1d5d3518bd2049a7e4aa7c", "content_id": "a91baf8e916f81fbb71bb59a498d446648f07658", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3265, "license_type": "permissive", "max_line_length": 93, "num_lines": 106, "path": "/src/tests/remote.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "'''\n SmartAPI Read-Only Test\n'''\n\nimport os\n\nfrom nose.core import runmodule\nfrom nose.tools import eq_\n\nfrom biothings.tests import BiothingsTestCase\n\n\nclass SmartAPIRemoteTest(BiothingsTestCase):\n\n ''' Test against server specified in environment variable SMARTAPI_HOST\n or SmartAPI production server if SMARTAPI_HOST is not specified '''\n\n __test__ = True # explicitly set this to be a test class\n\n host = os.getenv(\"SMARTAPI_HOST\", \"https://smart-api.info\")\n api = '/api'\n\n # Query Functionalities\n\n def test_101_regular(self):\n ''' Query regular string '''\n self.query(q='translator')\n\n def test_102_named_field(self):\n ''' Query named field '''\n self.query(q='tags.name:translator')\n\n def test_103_match_all(self):\n ''' Query all documents '''\n self.query(q='__all__')\n\n def test_104_random_score(self):\n ''' Query random documents '''\n res = self.query(q='__any__')\n query_1_id = res['hits'][0]['_id']\n res = self.query(q='__any__')\n query_2_id = res['hits'][0]['_id']\n assert query_1_id != query_2_id\n\n def test_105_filters(self):\n ''' Query with multiple filters '''\n flt = '{\"tags.name.raw\":[\"annotation\",\"variant\"],\"info.contact.name.raw\":[\"Chunlei Wu\"]}'\n res = self.query(q='__all__', filters=flt)\n eq_(len(res['hits']), 3)\n\n # Error Handling\n\n def test_201_special_char(self):\n ''' Handle special characters '''\n self.query(q='translat\\xef\\xbf\\xbd\\xef\\xbf\\xbd', expect_hits=False)\n self.request(\"query?q=http://example.com/\", expect_status=400)\n\n def test_202_missing_term(self):\n ''' Handle empty request '''\n self.request(\"query\", expect_status=400)\n\n def test_203_bad_size(self):\n ''' Handle type error '''\n self.request(\"query?q=__all__&size=my\", expect_status=400)\n\n def test_204_bad_index(self):\n ''' Handle index out of bound '''\n res_0 = self.request('query?q=__all__&fields=_id&size=5').json()\n ids_0 = {hit['_id'] for hit in res_0['hits']}\n res_1 = self.request('query?q=__all__&fields=_id&size=5&from=5').json()\n ids_1 = [hit['_id'] for hit in res_1['hits']]\n for _id in ids_1:\n if _id in ids_0:\n assert False\n\n # Result Formatting\n\n def test_301_sources(self):\n ''' Return specified fields '''\n res = self.query(q='__all__', fields='_id,info')\n for hit in res['hits']:\n assert '_id' in hit and 'info' in hit\n assert '_meta' not in hit\n\n def test_302_size(self):\n ''' Return specified size '''\n res = self.query(q='__all__', size=6)\n eq_(len(res['hits']), 6)\n\n def test_303_raw(self):\n ''' Return raw ES result '''\n res = self.query(q='__all__', raw=1)\n assert '_shards' in res\n\n def test_304_query(self):\n ''' Return query sent to ES '''\n res = self.request('query?q=__all__&rawquery=1').json()\n assert \"query\" in res\n assert \"bool\" in res[\"query\"]\n\n\nif __name__ == '__main__':\n print()\n print('SmartAPI Remote Test:', SmartAPIRemoteTest.host)\n print('-'*70 + '\\n')\n runmodule(argv=['', '--logging-level=INFO', '-v'])\n" }, { "alpha_fraction": 0.3244648277759552, "alphanum_fraction": 0.32721713185310364, "avg_line_length": 29.14285659790039, "blob_id": "8283f08bbe4d37878643ac89f30b75dd8aed94b4", "content_id": "248cee88725b11ebc64f88a5e0daf34f196396af", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3270, "license_type": "permissive", "max_line_length": 88, "num_lines": 105, "path": "/src/web/api/query_builder.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "from biothings.web.api.es.query_builder import ESQueryBuilder as BiothingsESQueryBuilder\r\n\r\nimport json\r\n\r\n\r\nclass SmartAPIQueryBuilder(BiothingsESQueryBuilder):\r\n\r\n def get_query_filters(self):\r\n\r\n _filter = None\r\n\r\n if self.options.filters:\r\n try:\r\n terms_filter = json.loads(self.options.filters)\r\n if terms_filter:\r\n if len(terms_filter) == 1:\r\n _filter = {\"terms\": terms_filter}\r\n else:\r\n _filter = [{\"terms\": {f[0]: f[1]}}\r\n for f in terms_filter.items()]\r\n except:\r\n pass\r\n\r\n return _filter\r\n\r\n def get_missing_filters(self):\r\n\r\n no_archived = {\r\n \"term\": {\r\n \"_meta._archived\": \"true\"\r\n }\r\n }\r\n return no_archived\r\n\r\n def _extra_query_types(self, q):\r\n\r\n dis_max_query = {\r\n \"query\": {\r\n \"dis_max\": {\r\n \"queries\": [\r\n {\r\n \"term\": {\r\n \"info.title\": {\r\n \"value\": q,\r\n \"boost\": 2.0\r\n }\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"server.url\": {\r\n \"value\": q,\r\n \"boost\": 1.1\r\n }\r\n }\r\n },\r\n {\r\n \"term\": {\r\n \"_id\": q,\r\n }\r\n },\r\n {\r\n \"query_string\": {\r\n \"query\": q\r\n }\r\n },\r\n {\r\n \"query_string\": {\r\n \"query\": q + \"*\",\r\n \"boost\": 0.8\r\n }\r\n },\r\n ]\r\n }\r\n }\r\n }\r\n return dis_max_query\r\n\r\n def _query_GET_query(self, q):\r\n # override as an alternative solution\r\n # see change below\r\n if self._is_user_query():\r\n _query = self._user_query(q)\r\n elif self._is_match_all(q):\r\n _query = self._match_all(q)\r\n elif self._is_random_query(q) and self.allow_random_query:\r\n _query = self._random_query(q)\r\n else:\r\n _query = self._extra_query_types(q)\r\n\r\n if not _query:\r\n _query = self._default_query(q)\r\n\r\n # previously assigned to _query directly\r\n _query['query'] = self.add_query_filters(_query)\r\n\r\n _query = self.queries.raw_query(_query)\r\n\r\n _ret = self._return_query_kwargs({'body': _query})\r\n\r\n if self.options.fetch_all:\r\n _ret['body'].pop('sort', None)\r\n _ret['body'].pop('size', None)\r\n _ret.update(self.scroll_options)\r\n return _ret\r\n" }, { "alpha_fraction": 0.5779533386230469, "alphanum_fraction": 0.5877051949501038, "avg_line_length": 38.32524108886719, "blob_id": "17aa96be96b4a4a6ca2eca792134ebc5ba6816f5", "content_id": "6c052777e34d75c7fd631522c40d0691602429cf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8101, "license_type": "permissive", "max_line_length": 190, "num_lines": 206, "path": "/src/web/api/transform.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "'''\nValidate and transform SmartAPI/OpenAPI v3 metadata for indexing\n'''\nimport base64\nimport copy\nimport gzip\nimport json\nimport sys\nfrom collections import OrderedDict\n\nimport jsonschema\nimport requests\nimport yaml\n\nif sys.version_info.major >= 3 and sys.version_info.minor >= 6:\n from hashlib import blake2b\nelse:\n from pyblake2 import blake2b # pylint: disable=import-error\n\n\n# Official oas3 json schema for validation is still in-development.\n# For now we us this updated oas3-schema from swagger-editor\nOAS3_SCHEMA_URL = 'https://raw.githubusercontent.com/swagger-api/swagger-editor/v3.7.1/src/plugins/json-schema-validator/oas3-schema.yaml'\nSWAGGER2_SCHEMA_URL = 'https://raw.githubusercontent.com/swagger-api/swagger-editor/v3.6.1/src/plugins/validate-json-schema/structural-validation/swagger2-schema.js'\n\n# List of root keys that should be indexed in version 2 schema\nSWAGGER2_INDEXED_ITEMS = ['info', 'tags', 'swagger', 'host', 'basePath']\n\n# list of major versions of schema that we support\nSUPPORTED_SCHEMA_VERSIONS = ['SWAGGER2', 'OAS3']\n\n# This is a separate schema for SmartAPI extensions only\nSMARTAPI_SCHEMA_URL = 'https://raw.githubusercontent.com/SmartAPI/smartAPI-Specification/OpenAPI.next/schemas/smartapi_schema.json'\nMETADATA_KEY_ORDER = ['openapi', 'info', 'servers',\n 'externalDocs', 'tags', 'security', 'paths', 'components']\n\n\ndef encode_raw(metadata):\n '''return encoded and compressed metadata'''\n _raw = json.dumps(metadata).encode('utf-8')\n _raw = base64.urlsafe_b64encode(gzip.compress(_raw)).decode('utf-8')\n return _raw\n\n\ndef decode_raw(raw, sorted=True, as_string=False):\n '''if sorted is True, the keys in the decoded dictionary will follow\n a defined order.\n '''\n _raw = gzip.decompress(base64.urlsafe_b64decode(raw)).decode('utf-8')\n if as_string:\n return _raw\n d = json.loads(_raw)\n if sorted:\n d2 = OrderedDict()\n for key in METADATA_KEY_ORDER:\n if key in d:\n d2[key] = d[key]\n for key in d:\n if key not in d2:\n d2[key] = d[key]\n return d2\n else:\n return d\n\n\ndef polite_requests(url, head=False):\n try:\n if head:\n res = requests.head(url, timeout=5)\n else:\n res = requests.get(url, timeout=5)\n except requests.exceptions.Timeout:\n return {\"success\": False, \"error\": \"URL request is timeout.\"}\n except requests.exceptions.ConnectionError:\n return {\"success\": False, \"error\": \"URL request had a connection error.\"}\n except requests.exceptions.RequestException:\n return {\"success\": False, \"error\": \"Failed to make the request to this URL.\"}\n if res.status_code != 200:\n return {\"success\": False, \"error\": \"URL request returned {}.\".format(res.status_code)}\n return {\"success\": True, \"response\": res}\n\n\ndef get_api_metadata_by_url(url, as_string=False):\n\n _res = polite_requests(url)\n if _res.get('success'):\n res = _res.get('response')\n if as_string:\n return res.text\n else:\n try:\n metadata = res.json()\n # except json.JSONDecodeError: # for py>=3.5\n except ValueError: # for py<3.5\n try:\n metadata = yaml.load(res.text, Loader=yaml.SafeLoader)\n except (yaml.scanner.ScannerError,\n yaml.parser.ParserError):\n return {\"success\": False,\n \"error\": \"Not a valid JSON or YAML format.\"}\n return metadata\n else:\n return _res\n\n\nclass APIMetadata:\n def __init__(self, metadata):\n # get the major version of the schema type\n if metadata.get('openapi', False):\n self.schema_version = 'OAS' + metadata['openapi'].split('.')[0]\n elif metadata.get('swagger', False):\n self.schema_version = 'SWAGGER' + metadata['swagger'].split('.')[0]\n else:\n self.schema_version = None\n # set the correct schema validation url\n if self.schema_version == 'SWAGGER2':\n self.schema_url = SWAGGER2_SCHEMA_URL\n else:\n self.schema_url = OAS3_SCHEMA_URL\n self.get_schema()\n self.metadata = metadata\n self._meta = self.metadata.pop('_meta', {})\n try:\n self._meta['ETag'] = requests.get(\n self._meta['url']).headers.get(\n 'ETag', 'I').strip('W/\"')\n except BaseException:\n pass\n if self.schema_version == 'SWAGGER2':\n self._meta['swagger_v2'] = True\n\n def get_schema(self):\n schema = requests.get(self.schema_url).text\n if schema.startswith(\"export default \"):\n schema = schema[len(\"export default \"):]\n try:\n self.oas_schema = json.loads(schema)\n except:\n self.oas_schema = yaml.load(schema, Loader=yaml.SafeLoader)\n self.smartapi_schema = requests.get(SMARTAPI_SCHEMA_URL).json()\n\n def encode_api_id(self):\n x = self._meta.get('url', None)\n if not x:\n raise ValueError(\"Missing required _meta.url field.\")\n return blake2b(x.encode('utf8'), digest_size=16).hexdigest()\n\n def validate(self, raise_error_on_v2=True):\n '''Validate API metadata against JSON Schema.'''\n if not self.schema_version or self.schema_version not in SUPPORTED_SCHEMA_VERSIONS:\n return {\"valid\": False, \"error\": \"Unsupported schema version '{}'. Supported versions are: '{}'.\".format(\n self.schema_version, SUPPORTED_SCHEMA_VERSIONS)}\n if raise_error_on_v2 and self.schema_version == 'SWAGGER2':\n return {\"valid\": False, \"error\": \"Found a v2 swagger schema, please convert to v3 for fullest functionality or click the checkbox to proceed with v2 anyway.\", \"swagger_v2\": True}\n try:\n jsonschema.validate(self.metadata, self.oas_schema)\n except jsonschema.ValidationError as e:\n err_msg = \"'{}': {}\".format('.'.join([str(x) for x in e.path]), e.message)\n return {\"valid\": False, \"error\": \"[{}] \".format(self.schema_version) + err_msg}\n except Exception as e:\n return {\"valid\": False, \"error\": \"Unexpected Validation Error: {} - {}\".format(type(e).__name__, e)}\n\n if self.schema_version == 'OAS3':\n try:\n jsonschema.validate(self.metadata, self.smartapi_schema)\n except jsonschema.ValidationError as e:\n err_msg = \"'{}': {}\".format('.'.join([str(x) for x in e.path]), e.message)\n return {\"valid\": False, \"error\": \"[SmartAPI] \" + err_msg}\n _warning = \"\"\n _ret = {\"valid\": True}\n else:\n _warning = \"No SmartAPI extensions supported on Swagger/OpenAPI version 2\"\n _ret = {\"valid\": True, \"_warning\": _warning, \"swagger_v2\": True}\n return _ret\n\n def _encode_raw(self):\n '''return encoded and compressed metadata'''\n _raw = json.dumps(self.metadata).encode('utf-8')\n _raw = base64.urlsafe_b64encode(gzip.compress(_raw)).decode('utf-8')\n return _raw\n\n def convert_es(self):\n '''convert API metadata for ES indexing.'''\n\n if self.schema_version == 'OAS3':\n _d = copy.copy(self.metadata)\n _d['_meta'] = self._meta\n # convert paths to a list of each path item\n _paths = []\n for path in _d.get('paths', []):\n _paths.append({\n \"path\": path,\n \"pathitem\": _d['paths'][path]\n })\n if _paths:\n _d['paths'] = _paths\n else:\n # swagger 2 or other, only index limited fields\n _d = {\"_meta\": self._meta}\n for key in SWAGGER2_INDEXED_ITEMS:\n if key in self.metadata:\n _d[key] = self.metadata[key]\n\n # include compressed binary raw metadata as \"~raw\"\n _d[\"~raw\"] = encode_raw(self.metadata)\n return _d\n" }, { "alpha_fraction": 0.6937277317047119, "alphanum_fraction": 0.7046263217926025, "avg_line_length": 46.82978820800781, "blob_id": "bb59323369f4f128b03c80ea622f77ef1cf39e55", "content_id": "61dce565efd93eeed4a0aafce18cf2bc4d76627c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4496, "license_type": "permissive", "max_line_length": 451, "num_lines": 94, "path": "/README.md", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "# SmartAPI\nIntelligent APIs for a more connected web.\n\nA BD2K/Network-of-BioThings project.\n\nSmartAPI allows API publishers to annotate their services and input/output parameters in a structured and identifiable manner, based on a standard JSON-LD format for biomedical APIs and services. By indexing and visualizing these descriptions as Linked Data in a Elasticsearch back-end, researchers can seamlessly identify the services that consume or return desired parameters, and automatically compose services in workflows that yield new insights.\n\nPresentation: http://bit.ly/smartAPIslides \nContact: [email protected] \n\n\n# How to run a dev API server locally\n1. Install Elasticsearch (version 6.x) at localhost:9200 (follow [this instruction](https://www.elastic.co/downloads/elasticsearch)) or install with docker (follow [this instruction](https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html))\n2. Clone this repo\n ```\n git clone https://github.com/SmartAPI/smartAPI.git\n ````\n3. Install system packages (on Ubuntu, for example)\n ```\n sudo apt install libcurl4-openssl-dev libssl-dev aws-cli\n ```\n4. Install python dependencies after navigating to root smartAPI directory\n ```\n cd smartAPI\n pip install -r requirements.txt\n ```\n5. Navigate to SmartAPI source files and create a *config_key.py* under *src*\n ```\n cd src\n touch config_key.py\n ```\n6. Update *config_key.py* with\n ```\n COOKIE_SECRET = '<Any Random String>'\n GITHUB_CLIENT_ID = '<your Github application Client ID>'\n GITHUB_CLIENT_SECRET = '<your Github application Client Secret>'\n SLACK_WEBHOOKS = [ \n\t {\n\t\t \"tag\": '<string>' or '<list of strings>', # (optional)\n\t\t \"webhook\": '<insert webhook URL>', \n\t\t \"template\": '<slack markdown string with variables included as {variable_name}>' # (optional)\n\t }\n ] # (optional) \n ```\n For Github incorporation, follow [this instruction](https://developer.github.com/apps/building-oauth-apps/creating-an-oauth-app/) to create your Github Client ID and Secret. \n Enter any _Application name_, `http://localhost:8000/` for _Homepage \n URL_ and `http://localhost:8000/oauth` for _Authorization callback URL_.\n \n For SLACK_WEBHOOKS (optional), the list may not be included if one does not want Slack notifications pushed every time a new API is added to the smartAPI registry. \n \n Alternatively, if one wants slack notifications sent to more than one channel, one may list more than one dict in the ```SLACK_WEBHOOKS``` list.\n \n Follow [this instruction](https://slack.com/help/articles/115005265063-Incoming-Webhooks-for-Slack) to create Slack webhooks and obtain webhook URLs. \n \n If one would like a Slack notification pushed only if the newly registered API contains a specific tag or tags, one should include the ```tag``` key, which should have the value of the specific tag(s) (case sensitive).\n \n For example:\n ```\n \"tags\": ['translator','biothings'] # will send every time an API is registered with a 'translator' and/or 'biothings' tag \n ```\n or\n ```\n \"tags\": 'translator' # will send every time an API is registered with a 'translator' tag \n ```\n Finally, to supply your own template instead of using the default Slack Markdown template, please supply the template as a string, with optional variables to be included in a ```{variable}``` format. For example: \n ```\n \"template\": \"A new API has been registered on SmartAPI.info:\\n\\n*Title:* {api_title}\\n*Description:* {api_description}\"\n ```\n The variables that can be supplied include: \n ```\n api_title # title of registered API \n api_description # listed describtion of API\n registry_url # url that the API is listed in the SmartAPI registry\n docs_url # url for the API's documentation on SmartAPI.info\n github_user # the github username of the individual that registered the API\n ```\n \n7. Create index in Python (version 3.x) shell:\n ```\n from web.api import es \n es.create_index()\n ```\n Or import some API data from a saved dump file. Contact us for the dump file. \n And replace the name of the file in the command with the backup file name.\n ```\n from web.api import es\n esq = es.ESQuery()\n esq.restore_all(\"smartapi_oas3_backup_20200706.json\", es.ES_INDEX_NAME)\n ```\n8. Run dev server\n ```\n python index.py --debug\n ```\nYou should now able to access API dev server at http://localhost:8000\n" }, { "alpha_fraction": 0.6614487171173096, "alphanum_fraction": 0.6651319861412048, "avg_line_length": 32.25510025024414, "blob_id": "d2e71a411f78a41e28cbd2ecd0444f4738f7af58", "content_id": "6cc45bddf9ba0c621ec460a8c1e05ce8792909c0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3258, "license_type": "permissive", "max_line_length": 96, "num_lines": 98, "path": "/src/utils/slack_notification.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "import json\nimport requests\nimport re\nfrom tornado.httpclient import HTTPRequest, AsyncHTTPClient\n\ntry:\n\tfrom config import SLACK_WEBHOOKS\nexcept ImportError: \n\tSLACK_WEBHOOKS = []\n\ndef get_tags(data):\n\t\"\"\"Generate array of all the tags listed in the newly registered API\"\"\"\n\ttags = []\n\tif('tags' in data):\n\t\tfor item in data['tags']:\n\t\t\ttags.append(item['name'])\n\treturn tags\n\ndef change_link_markdown(description):\n\t\"\"\"Change markdown styling of links to match fit Slack Markdown styling\n\t\n\tDescription text links formatted as [link name](URL), we want <URL|link name>\n\t\"\"\"\n\treturn re.sub('\\[(?P<label>[^\\[\\]]+)\\]\\((?P<url>[^()]+)\\)', '<\\g<url>|\\g<label>>', description)\n\ndef generate_slack_params(data, res, github_user, webhook_dict):\n\t\"\"\"Generate parameters that will be used in slack post request. \n\t\n\tIn this case, markdown is used to generate formatting that \n\twill show in Slack message\n\t\"\"\"\n\tapi_title = data[\"info\"][\"title\"]\n\t# limit API description to 120 characters\n\tapi_description = ((data[\"info\"][\"description\"][:120] + '...') \n\t\t\t\t\t\tif len(data[\"info\"][\"description\"]) > 120 \n\t\t\t\t\t\telse data[\"info\"][\"description\"])\n\tapi_description = change_link_markdown(api_description)\n\tapi_id = res[\"_id\"]\n\tregistry_url = f\"http://smart-api.info/registry?q={api_id}\" \n\tdocs_url = f\"http://smart-api.info/ui/{api_id}\"\n\tapi_data = {\n\t\t\"api_title\": api_title, \n\t\t\"api_description\": api_description,\n\t\t\"registry_url\": registry_url,\n\t\t\"docs_url\": docs_url,\n\t\t\"github_user\": github_user\n\t}\n\t# default markdown\n\tdefault_block_markdown_template = (\"A new API has been registered on SmartAPI.info:\\n\\n\"\n\t\t\t\t\t\t\"*Title:* {api_title}\\n\"\n\t\t\t\t\t\t\"*Description:* {api_description}\\n\"\n\t\t\t\t\t\t\"*Registered By:* <https://github.com/{github_user}|{github_user}>\\n\\n\"\n\t\t\t\t\t\t\"<{registry_url}|View on SmartAPI Registry> - <{docs_url}|View API Documentation>\")\n\t# get template - use default if one not provided\n\tblock_markdown_tpl = webhook_dict.get(\"template\", default_block_markdown_template)\n\t# fill template with variable values\n\tblock_markdown = block_markdown_tpl.format(**api_data)\n\tparams = {\n \"attachments\": [{\n \t\"color\": \"#b0e3f9\",\n \"blocks\": [{\n\t\t\t\t\"type\": \"section\",\n\t\t\t\t\"text\": {\n\t\t\t\t\t\"type\": \"mrkdwn\",\n\t\t\t\t\t\"text\": block_markdown\n\t\t\t\t}\n\t\t\t}]\n }]\n }\n\treturn params\n\ndef send_slack_msg(data, res, github_user):\n\t\"\"\"Make requests to slack to post information about newly registered API. \n\t\n\tNotifications will be sent to every \n\tchannel/webhook that is not tag specific, or will be sent to\n\tslack if the registered API contains a tag that is also specific\n\ta channel/webhook. \n\t\"\"\"\n\theaders = {'content-type': 'application/json'}\n\tdata_tags = get_tags(data)\n\thttp_client = AsyncHTTPClient()\n\tfor x in SLACK_WEBHOOKS:\n\t\tsend_request = False\n\t\tif('tags' in x):\n\t\t\tif(isinstance(x['tags'], str)):\n\t\t\t\tif(x['tags'] in data_tags):\n\t\t\t\t\tsend_request = True\n\t\t\telif(isinstance(x['tags'], list)):\n\t\t\t\tif(bool(set(x['tags']) & set(data_tags))):\n\t\t\t\t\tsend_request = True\n\t\telse:\n\t\t\tsend_request = True\n\t\tif(send_request):\n\t\t\tparams = generate_slack_params(data, res, github_user, x)\n\t\t\treq = HTTPRequest(url=x['webhook'], method='POST', body=json.dumps(params), headers=headers)\n\t\t\thttp_client = AsyncHTTPClient()\n\t\t\thttp_client.fetch(req)" }, { "alpha_fraction": 0.6734058856964111, "alphanum_fraction": 0.6741834878921509, "avg_line_length": 27.9069766998291, "blob_id": "d8b067b251deeb740ba6a986ff76084e126c251f", "content_id": "fd65d40a643fc699961b56f4ede6eebd630d12d7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1286, "license_type": "permissive", "max_line_length": 69, "num_lines": 43, "path": "/src/index.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "\"\"\"\r\n SmartAPI Web Server Entry Point\r\n\r\n > python index.py\r\n\r\n\"\"\"\r\n\r\nimport datetime\r\nimport logging\r\nimport os.path\r\n\r\nfrom tornado.ioloop import IOLoop\r\nfrom utils.api_monitor import update_uptime_status\r\nfrom utils.versioning import backup_and_refresh\r\n\r\nimport config\r\nfrom biothings.web.index_base import main\r\nfrom biothings.web.settings import BiothingESWebSettings\r\n\r\nWEB_SETTINGS = BiothingESWebSettings(config=config)\r\n\r\n\r\ndef schedule_daily_job():\r\n tomorrow = datetime.datetime.today() + datetime.timedelta(days=1)\r\n midnight = datetime.datetime.combine(tomorrow, datetime.time.min)\r\n IOLoop.current().add_timeout(midnight.timestamp(), daily_job)\r\n\r\ndef daily_job():\r\n def sync_job():\r\n backup_and_refresh()\r\n update_uptime_status()\r\n IOLoop.current().run_in_executor(None, sync_job)\r\n schedule_daily_job()\r\n\r\nif __name__ == '__main__':\r\n (SRC_PATH, _) = os.path.split(os.path.abspath(__file__))\r\n STATIC_PATH = os.path.join(SRC_PATH, 'static')\r\n # IOLoop.current().add_callback(daily_job) # run upon start\r\n schedule_daily_job()\r\n main(WEB_SETTINGS.generate_app_list(),\r\n app_settings={\"cookie_secret\": config.COOKIE_SECRET},\r\n debug_settings={\"static_path\": STATIC_PATH},\r\n use_curl=True)\r\n" }, { "alpha_fraction": 0.6397415399551392, "alphanum_fraction": 0.6429725289344788, "avg_line_length": 23.79166603088379, "blob_id": "96a084021e7dbea0d1f1ea36af06355ebfb8f317", "content_id": "9ccf80ff3b86b963294dcb18acc1f8489e4e34ed", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "permissive", "max_line_length": 87, "num_lines": 24, "path": "/src/tests/local.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "'''\r\n SmartAPI Read-Only Test\r\n'''\r\n\r\n\r\nfrom nose.core import run\r\nfrom biothings.tests import TornadoTestServerMixin\r\n\r\nfrom remote import SmartAPIRemoteTest\r\n\r\n\r\nclass SmartAPILocalTest(TornadoTestServerMixin, SmartAPIRemoteTest):\r\n '''\r\n Self contained test class\r\n Starts a Tornado server and perform tests against this server.\r\n '''\r\n __test__ = True # explicitly set this to be a test class\r\n\r\n\r\nif __name__ == '__main__':\r\n print()\r\n print('SmartAPI Local Test')\r\n print('-'*70 + '\\n')\r\n run(argv=['', '--logging-level=INFO', '-v'], defaultTest='local.SmartAPILocalTest')\r\n" }, { "alpha_fraction": 0.4842931926250458, "alphanum_fraction": 0.4864746928215027, "avg_line_length": 35.967742919921875, "blob_id": "ca92bcf74800317c0f68458ac865fdce96cea6ba", "content_id": "195d1a7b25a8a07087e13972da9fa24b6bfc18d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2292, "license_type": "permissive", "max_line_length": 79, "num_lines": 62, "path": "/src/config.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "''' SmartAPI Configuration '''\n# pylint: disable=wildcard-import, unused-wildcard-import, unused-import\n\nfrom biothings.web.settings.default import *\nfrom config_key import *\nfrom web.api.handlers import APP_LIST as api_app_list\nfrom web.api.query_builder import SmartAPIQueryBuilder\nfrom web.handlers import APP_LIST as web_app_list\n\n# *****************************************************************************\n# Credentials\n# *****************************************************************************\n# Define in config_key.py:\n# COOKIE_SECRET = '<Any Random String>'\n# GITHUB_CLIENT_ID = '<your Github application Client ID>'\n# GITHUB_CLIENT_SECRET = '<your Github application Client Secret>'\n\n# *****************************************************************************\n# Elasticsearch\n# *****************************************************************************\nES_INDEX = 'smartapi_oas3'\nES_DOC_TYPE = 'api'\n\n# *****************************************************************************\n# Tornado URL Patterns\n# *****************************************************************************\n\n\ndef add_apps(prefix='', app_list=None):\n '''\n Add prefix to each url handler specified in app_list.\n add_apps('test', [('/', testhandler,\n ('/test2', test2handler)])\n will return:\n [('/test/', testhandler,\n ('/test/test2', test2handler)])\n '''\n if not app_list:\n app_list = []\n if prefix:\n return [('/'+prefix+url, handler) for url, handler in app_list]\n else:\n return app_list\n\n\nAPP_LIST = []\nAPP_LIST += add_apps('', web_app_list)\nAPP_LIST += add_apps('api', api_app_list)\n\n# *****************************************************************************\n# Biothings Query Settings\n# *****************************************************************************\n# Subclass of biothings.web.api.es.query_builder.ESQueryBuilder\nES_QUERY_BUILDER = SmartAPIQueryBuilder\n# Keyword Argument Control\nQUERY_GET_ESQB_KWARGS.update({'filters': {'default': None, 'type': str}})\n# Header Strings\nACCESS_CONTROL_ALLOW_METHODS = 'GET,POST,PUT,DELETE,OPTIONS'\n# Only affect API endpoints\nDISABLE_CACHING = True\n# Heavy operation. Enable on small db only.\nALLOW_RANDOM_QUERY = True\n" }, { "alpha_fraction": 0.592901885509491, "alphanum_fraction": 0.5970772504806519, "avg_line_length": 17.79166603088379, "blob_id": "c490a6a0bbd9f4ce62ad09e463cff483f627e6bb", "content_id": "3cedf213f4763bec9fad35331c061abedf853d11", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "permissive", "max_line_length": 48, "num_lines": 24, "path": "/src/utils/versioning.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "'''\r\nBackup es index to S3 and refresh\r\n'''\r\n\r\nimport logging\r\n\r\nfrom tornado.ioloop import IOLoop\r\n\r\nfrom web.api.es import ESQuery\r\n\r\n\r\ndef backup_and_refresh():\r\n '''\r\n Run periodically in the main event loop\r\n '''\r\n esq = ESQuery()\r\n try:\r\n esq.backup_all(aws_s3_bucket='smartapi')\r\n except:\r\n logging.exception(\"Backup failed.\")\r\n try:\r\n esq.refresh_all(dryrun=False)\r\n except:\r\n logging.exception(\"Refresh failed.\")\r\n\r\n\r\n" }, { "alpha_fraction": 0.5894524455070496, "alphanum_fraction": 0.5913869738578796, "avg_line_length": 33.661808013916016, "blob_id": "428de1183a58975c0b6f9c1c7dcb41362e51045e", "content_id": "c30856c8e03530bb1fb80b56a2a0d120fd9edcf3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11889, "license_type": "permissive", "max_line_length": 79, "num_lines": 343, "path": "/src/web/handlers.py", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "import json\nimport logging\nimport os\nimport sys\n\nimport tornado.gen\nimport tornado.httpclient\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.web\nimport torngithub\nfrom jinja2 import Environment, FileSystemLoader\nfrom tornado.httputil import url_concat\nfrom torngithub import json_decode, json_encode\n\nfrom web.api.es import ESQuery\nfrom biothings.web.api.helper import BaseHandler as BioThingsBaseHandler\n\nlog = logging.getLogger(\"smartapi\")\n\n\nsrc_path = os.path.split(os.path.split(os.path.abspath(__file__))[0])[0]\nif src_path not in sys.path:\n sys.path.append(src_path)\n\nTEMPLATE_PATH = os.path.join(src_path, 'templates/')\nAVAILABLE_TAGS = ['translator', 'nihdatacommons']\n\n# your Github application Callback\nGITHUB_CALLBACK_PATH = \"/oauth\"\nGITHUB_SCOPE = \"\"\n\n# Docs: http://docs.python-guide.org/en/latest/scenarios/web/\n# Load template file templates/site.html\ntemplateLoader = FileSystemLoader(searchpath=TEMPLATE_PATH)\ntemplateEnv = Environment(loader=templateLoader, cache_size=0)\n\n\nclass BaseHandler(BioThingsBaseHandler):\n def get_current_user(self):\n user_json = self.get_secure_cookie(\"user\")\n if not user_json:\n return None\n return json_decode(user_json)\n\n\nclass MainHandler(BaseHandler):\n def get(self):\n slug = self.request.host.split(\".\")[0]\n # print(\"Host: {} - Slug: {}\".format(self.request.host, slug))\n if slug.lower() not in ['www', 'dev', 'smart-api']:\n # try to get a registered subdomain/tag\n esq = ESQuery()\n api_id = esq.get_api_id_from_slug(slug)\n if api_id:\n swaggerUI_file = \"smartapi-ui.html\"\n swagger_template = templateEnv.get_template(swaggerUI_file)\n swagger_output = swagger_template.render(apiID=api_id)\n self.write(swagger_output)\n return\n index_file = \"index.html\"\n index_template = templateEnv.get_template(index_file)\n index_output = index_template.render()\n self.write(index_output)\n\n\nclass UserInfoHandler(BaseHandler):\n def get(self):\n current_user = self.get_current_user() or {}\n for key in ['access_token', 'id']:\n if key in current_user:\n del current_user[key]\n self.return_json(current_user)\n\n\nclass LoginHandler(BaseHandler):\n def get(self):\n xsrf = self.xsrf_token\n login_file = \"login.html\"\n login_template = templateEnv.get_template(login_file)\n path = GITHUB_CALLBACK_PATH\n _next = self.get_argument(\"next\", \"/\")\n if _next != \"/\":\n path += \"?next={}\".format(_next)\n login_output = login_template.render(path=path, xsrf=xsrf)\n self.write(login_output)\n\n\nclass AddAPIHandler(BaseHandler, torngithub.GithubMixin):\n # def get(self):\n # self.write(\"Hello, world\")\n # self.write(html_output)\n # template.render(list=movie_list,\n # title=\"Here is my favorite movie list\")\n def get(self):\n if self.current_user:\n # self.write('Login User: ' + self.current_user[\"name\"]\n # + '<br> Email: ' + self.current_user[\"email\"]\n # + ' <a href=\"/logout\">Logout</a>')\n template_file = \"reg_form.html\"\n reg_template = templateEnv.get_template(template_file)\n reg_output = reg_template.render()\n self.write(reg_output)\n else:\n path = '/login'\n _next = self.get_argument(\"next\", self.request.path)\n if _next != \"/\":\n path += \"?next={}\".format(_next)\n self.redirect(path)\n\n\nclass LogoutHandler(BaseHandler):\n def get(self):\n self.clear_cookie(\"user\")\n self.redirect(self.get_argument(\"next\", \"/\"))\n\n\nclass GithubLoginHandler(BaseHandler, torngithub.GithubMixin):\n\n @tornado.gen.coroutine\n def get(self):\n # we can append next to the redirect uri, so the user gets the\n # correct URL on login\n redirect_uri = url_concat(self.request.protocol +\n \"://\" + self.request.host +\n GITHUB_CALLBACK_PATH,\n {\"next\": self.get_argument('next', '/')})\n\n # if we have a code, we have been authorized so we can log in\n if self.get_argument(\"code\", False):\n user = yield self.get_authenticated_user(\n redirect_uri=redirect_uri,\n client_id=self.web_settings.GITHUB_CLIENT_ID,\n client_secret=self.web_settings.GITHUB_CLIENT_SECRET,\n code=self.get_argument(\"code\")\n )\n if user:\n log.info('logged in user from github: ' + str(user))\n self.set_secure_cookie(\"user\", json_encode(user))\n else:\n self.clear_cookie(\"user\")\n self.redirect(self.get_argument(\"next\", \"/\"))\n return\n\n # otherwise we need to request an authorization code\n yield self.authorize_redirect(\n redirect_uri=redirect_uri,\n client_id=self.web_settings.GITHUB_CLIENT_ID,\n extra_params={\"scope\": GITHUB_SCOPE, \"foo\": 1}\n )\n\n\nclass RegistryHandler(BaseHandler):\n def get(self, tag=None):\n template_file = \"smart-registry.html\"\n # template_file = \"/smartapi/dist/index.html\"\n reg_template = templateEnv.get_template(template_file)\n # author filter parsing\n if self.get_argument('owners', False):\n owners = [x.strip().lower()\n for x in self.get_argument('owners').split(',')]\n else:\n owners = []\n # special url tag\n if tag:\n if tag.lower() in AVAILABLE_TAGS:\n # print(\"tags: {}\".format([tag.lower()]))\n reg_output = reg_template.render(Context=json.dumps(\n {\"Tags\": [tag.lower()],\n \"Special\": True,\n \"Owners\": owners}))\n else:\n raise tornado.web.HTTPError(404)\n # typical query filter tags\n elif self.get_argument('tags', False) or \\\n self.get_argument('owners', False):\n tags = [x.strip().lower()\n for x in self.get_argument('tags', \"\").split(',')]\n # print(\"tags: {}\".format(tags))\n reg_output = reg_template.render(\n Context=json.dumps(\n {\"Tags\": tags,\n \"Special\": False,\n \"Owners\": owners}))\n else:\n reg_output = reg_template.render(Context=json.dumps({}))\n self.write(reg_output)\n\n\nclass DocumentationHandler(BaseHandler):\n def get(self):\n doc_file = \"documentation.html\"\n documentation_template = templateEnv.get_template(doc_file)\n documentation_output = documentation_template.render()\n self.write(documentation_output)\n\n\nclass DashboardHandler(BaseHandler):\n def get(self):\n doc_file = \"dashboard.html\"\n dashboard_template = templateEnv.get_template(doc_file)\n dashboard_output = dashboard_template.render()\n self.write(dashboard_output)\n\n\nclass SwaggerUIHandler(BaseHandler):\n def get(self, yourApiID=None):\n if not yourApiID:\n if self.get_argument('url', False):\n api_id = self.get_argument('url').split('/')[-1]\n self.redirect('/ui/{}'.format(api_id), permanent=True)\n else:\n raise tornado.web.HTTPError(404)\n return\n swaggerUI_file = \"smartapi-ui.html\"\n swagger_template = templateEnv.get_template(swaggerUI_file)\n swagger_output = swagger_template.render(apiID=yourApiID)\n self.write(swagger_output)\n\n\nclass BrandingHandler(BaseHandler):\n def get(self):\n doc_file = \"brand.html\"\n branding_template = templateEnv.get_template(doc_file)\n branding_output = branding_template.render()\n self.write(branding_output)\n\n\nclass GuideHandler(BaseHandler):\n def get(self):\n doc_file = \"guide.html\"\n guide_template = templateEnv.get_template(doc_file)\n guide_output = guide_template.render()\n self.write(guide_output)\n\n\nclass APIEditorHandler(BaseHandler):\n def get(self, yourApiID=None):\n if not yourApiID:\n if self.get_argument('url', False):\n api_id = self.get_argument('url').split('/')[-1]\n self.redirect('/editor/{}'.format(api_id), permanent=True)\n else:\n # raise tornado.web.HTTPError(404)\n swaggerEditor_file = \"editor.html\"\n swagger_template = templateEnv.get_template(swaggerEditor_file)\n swagger_output = swagger_template.render(\n Context=json.dumps({\"Id\": '', \"Data\": False}))\n self.write(swagger_output)\n return\n swaggerEditor_file = \"editor.html\"\n swagger_template = templateEnv.get_template(swaggerEditor_file)\n swagger_output = swagger_template.render(\n Context=json.dumps({\"Id\": yourApiID, \"Data\": True}))\n self.write(swagger_output)\n\n\nclass AboutHandler(BaseHandler):\n def get(self):\n doc_file = \"about.html\"\n about_template = templateEnv.get_template(doc_file)\n about_output = about_template.render()\n self.write(about_output)\n\nclass PrivacyHandler(BaseHandler):\n def get(self):\n doc_file = \"privacy.html\"\n privacy_template = templateEnv.get_template(doc_file)\n privacy_output = privacy_template.render()\n self.write(privacy_output)\n\nclass FAQHandler(BaseHandler):\n def get(self):\n doc_file = \"faq.html\"\n faq_template = templateEnv.get_template(doc_file)\n faq_output = faq_template.render()\n self.write(faq_output)\n\nclass TemplateHandler(BaseHandler):\n\n def initialize(self, filename, status_code=200, env=None):\n\n self.filename = filename\n self.status = status_code\n\n def get(self, **kwargs):\n\n template = self.env.get_template(self.filename)\n output = template.render(Context=json.dumps(kwargs))\n\n self.set_status(self.status)\n self.write(output)\n\nclass PortalHandler(BaseHandler):\n\n def get(self, portal=None):\n portals = ['translator']\n\n template_file = \"portal.html\"\n reg_template = templateEnv.get_template(template_file)\n\n if portal in portals:\n reg_output = reg_template.render(Context=json.dumps(\n {\"portal\": portal}))\n else:\n raise tornado.web.HTTPError(404)\n self.write(reg_output)\n\nclass MetaKGHandler(BaseHandler):\n\n def get(self):\n print('META KG')\n doc_file = \"metakg.html\"\n template = templateEnv.get_template(doc_file)\n output = template.render(Context=json.dumps(\n {\"portal\": 'translator'}))\n self.write(output)\n\nAPP_LIST = [\n (r\"/\", MainHandler),\n (r\"/user/?\", UserInfoHandler),\n (r\"/add_api/?\", AddAPIHandler),\n (r\"/login/?\", LoginHandler),\n (GITHUB_CALLBACK_PATH, GithubLoginHandler),\n (r\"/logout/?\", LogoutHandler),\n (r\"/registry/(.+)/?\", RegistryHandler),\n (r\"/registry/?\", RegistryHandler),\n (r\"/documentation/?\", DocumentationHandler),\n (r\"/dashboard/?\", DashboardHandler),\n (r\"/ui/(.+)/?\", SwaggerUIHandler),\n (r\"/ui/?\", SwaggerUIHandler),\n (r\"/branding/?\", BrandingHandler),\n (r\"/guide/?\", GuideHandler),\n (r\"/editor/(.+)/?\", APIEditorHandler),\n (r\"/editor/?\", APIEditorHandler),\n (r\"/about/?\", AboutHandler),\n (r\"/faq/?\", FAQHandler),\n (r\"/privacy/?\", PrivacyHandler),\n # (r\"/portal/?\", TemplateHandler, {\"filename\": \"registry.html\"}),\n (r\"/portal/translator/metakg/?\", MetaKGHandler),\n (r\"/portal/([^/]+)/?\", PortalHandler),\n\n]\n" }, { "alpha_fraction": 0.6185566782951355, "alphanum_fraction": 0.7285223603248596, "avg_line_length": 17.1875, "blob_id": "b7a1ec13186e89ba48a7d84602e3638f24a59376", "content_id": "7bf9737837ae1d7bf3309644ceecea279fa6a36c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 291, "license_type": "permissive", "max_line_length": 84, "num_lines": 16, "path": "/requirements.txt", "repo_name": "polyg314/smartAPI", "src_encoding": "UTF-8", "text": "biothings[web_extra]==0.5.0\n\n# document validation\njsonschema>=2.6.0\n\n# web handling\nelasticsearch-dsl==6.4.0\ntorngithub==0.2.0 \nJinja2>=2.9.6\npycurl==7.43.0.3\n\n# gitdb version specified because gitdb.utils.compat not available in newest version\ngitdb==0.6.4\n\n# s3 backup\nboto3>=1.9.86,<2.0\n" } ]
14
bobycv06fpm/another-gui-frontend
https://github.com/bobycv06fpm/another-gui-frontend
32db1bd532ac2a91e95f0905fae17cdcdbc95d0b
73cae344244917e5c33d4d00515d1f52bdb06a5b
d4a19fb75d6ebc6b75cce1c36a1f1f876cea1eaa
refs/heads/main
2023-07-10T15:23:01.035542
2021-08-04T20:55:48
2021-08-04T20:55:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8127490282058716, "alphanum_fraction": 0.8167330622673035, "avg_line_length": 124.5, "blob_id": "4f8308f0b15ec158a525468426791c9622afc0c4", "content_id": "9e9e125d56c41510633b5821d408055172581fa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 251, "license_type": "no_license", "max_line_length": 227, "num_lines": 2, "path": "/README.md", "repo_name": "bobycv06fpm/another-gui-frontend", "src_encoding": "UTF-8", "text": "# another-gui-frontend\nFirst experience with PySimpleGUI- acts as a frontend intended for running inference on various GPT-2 and GPT-Neo models. Layout was my main focus here, the theming and colour scheme are unfinished and likely badly implemented.\n" }, { "alpha_fraction": 0.5592092275619507, "alphanum_fraction": 0.5794947743415833, "avg_line_length": 44.35029983520508, "blob_id": "2cfea16cfc8bad3723961c7f66c3d38c0cb7a327", "content_id": "5fcadbc0a53153e084c566cc87cf3f460bc43108", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15485, "license_type": "no_license", "max_line_length": 346, "num_lines": 334, "path": "/another gui frontend.py", "repo_name": "bobycv06fpm/another-gui-frontend", "src_encoding": "UTF-8", "text": "from PySimpleGUI.PySimpleGUI import popup_yes_no, theme_background_color, theme_button_color, theme_element_text_color, theme_slider_color, theme_text_color, theme_text_element_background_color\r\nfrom aitextgen import aitextgen\r\nfrom transformers import GPT2Tokenizer\r\nimport torch\r\n\r\nimport PySimpleGUI as sg\r\n\r\n# from the AID2 repo, stuff to tidy up outputs\r\ndef cut_trailing_sentence(text):\r\n last_punc = max(text.rfind(\".\"), text.rfind(\"!\"), text.rfind(\"?\"))\r\n if last_punc <= 0:\r\n last_punc = len(text) - 1\r\n\r\n et_token = text.find(\"<\")\r\n if et_token > 0:\r\n last_punc = min(last_punc, et_token - 1)\r\n\r\n act_token = text.find(\">\")\r\n if act_token > 0:\r\n last_punc = min(last_punc, act_token - 1)\r\n\r\n text = text[:last_punc+1]\r\n\r\n text = cut_trailing_quotes(text)\r\n return text\r\n\r\ndef cut_trailing_quotes(text):\r\n num_quotes = text.count('\"')\r\n if num_quotes % 2 == 0:\r\n return text\r\n else:\r\n final_ind = text.rfind('\"')\r\n return text[:final_ind]\r\n\r\ndef standardize_punctuation(text):\r\n text = text.replace(\"’\", \"'\")\r\n text = text.replace(\"`\", \"'\")\r\n text = text.replace(\"“\", '\"')\r\n text = text.replace(\"”\", '\"')\r\n return text\r\n\r\n# my code from here\r\n\r\nai = None\r\nmodel_name = None\r\ntokenizer = None\r\n\r\n# default generation settings\r\noutlen = 128\r\nouttemp = 0.9\r\noutreppen = 1.0\r\noutlenpen = 1.0\r\nouttopk = 50\r\nouttopp = 1.0\r\nback_memory = ''\r\naid_style = False # work in progress, currently just prefixes \"> You\" to the beginning of user input\r\n\r\ndef context_window():\r\n print(\"Opening 'context' window...\")\r\n # context window\r\n cont_layout = [[sg.Text('Back Memory (Inserted before output history)')],\r\n [sg.Multiline(default_text = back_memory, size = (120,20), key = '-MEMORY-', background_color = 'black', text_color = 'white', enable_events = True)],\r\n [sg.Button('Save', key = '-SAVECTX-'), sg.Text('Memory token count', key = '-CTXTOKENS-')]\r\n ]\r\n ctx_window = sg.Window(\"Extra stuff\", cont_layout, finalize = True)\r\n ctx_window['-MEMORY-'].set_cursor(cursor_color = 'white')\r\n return ctx_window\r\n \r\ndef get_context_token_count(contextbox): #used in the context window, could likely be merged into get_token_count() with a little effort\r\n print(\"Updating context token counts...\")\r\n contexttokens = len(tokenizer.encode(contextbox))\r\n print(str(contexttokens) + \" tokens in context box\")\r\n return contexttokens\r\n\r\ndef get_token_count(inpbox, outbox): #used in main window\r\n print(\"Updating token counts...\")\r\n inptokens = len(tokenizer.encode(inpbox))\r\n print(str(inptokens) + \" tokens in input box\")\r\n outtokens = len(tokenizer.encode(outbox))\r\n print(str(outtokens) + \" tokens in output box\")\r\n return inptokens, outtokens\r\n\r\n# generates new text after making sure that back memory, new inputs and old inputs/outputs are formatted correctly\r\ndef generate(new_text, old_text):\r\n if len(back_memory)>0:\r\n memory = back_memory + '\\n'\r\n memtokens = tokenizer.encode(memory)\r\n prompttemp = memory + old_text + \" \" + new_text\r\n else:\r\n prompttemp = old_text + \" \" + new_text\r\n prefixtokens = tokenizer.encode(prompttemp)\r\n maxlen = len(prefixtokens) + int(outlen)\r\n minlen = len(prefixtokens)\r\n\r\n prefix_token_limit = 2048 - outlen\r\n print(\"Prefix token limit: \"+str(prefix_token_limit))\r\n\r\n print(\"Given prompt:\\n\" + prompttemp)\r\n if minlen > prefix_token_limit:\r\n print(\"Too many tokens! Over max limit by \" + str(len(prefixtokens)-prefix_token_limit))\r\n leftover_tokens = prefixtokens[0:len(prefixtokens) - prefix_token_limit]\r\n if len(back_memory) > 0: # memory\r\n prefixtokens = prefixtokens[len(leftover_tokens)+len(memtokens):len(prefixtokens)]\r\n else: # no memory\r\n prefixtokens = prefixtokens[len(leftover_tokens):len(prefixtokens)]\r\n \r\n\r\n print(prefixtokens)\r\n prompttemp = tokenizer.decode(prefixtokens)\r\n print(\"Actual prompt being sent to the model:\\n\" + prompttemp)\r\n gen_text = ai.generate_one(prompt = prompttemp,\r\n min_length = minlen,\r\n max_length = maxlen,\r\n temperature = outtemp,\r\n repetition_penalty = outreppen,\r\n length_penalty = outlenpen,\r\n top_k = outtopk,\r\n top_p = outtopp\r\n )\r\n print(\"Generated: \" + gen_text)\r\n torch.cuda.empty_cache()\r\n if len(prompttemp)>0:\r\n final_gen_text = gen_text[len(prompttemp)-1:]\r\n else:\r\n final_gen_text = gen_text\r\n final_gen_text = cut_trailing_sentence(final_gen_text)\r\n print(\"Final Generated text: \" + final_gen_text)\r\n return final_gen_text\r\n\r\n#instance the given model\r\ndef instance_neo(m):\r\n valid_choice = False\r\n while valid_choice == False:\r\n use_gpu = input(\"Use GPU? Y/N: \") # requires cuda\r\n if use_gpu.lower() == \"y\":\r\n use_gpu = True\r\n print(\"Using GPU. Beware of OOM.\")\r\n valid_choice = True\r\n elif use_gpu.lower() == \"n\":\r\n use_gpu = False\r\n print(\"Not using GPU. Expect slow responses.\")\r\n valid_choice = True\r\n continue\r\n valid_choice = False\r\n while valid_choice == False:\r\n use_fp16 = input(\"Run at half-precision? Y/N: \") # requires using a gpu\r\n if use_fp16.lower() == \"y\":\r\n use_fp16 = True\r\n print(\"Using FP16. Expect the occasional random token.\")\r\n valid_choice = True\r\n elif use_fp16.lower() == \"n\":\r\n use_fp16 = False\r\n print(\"Not using FP16. Expect higher RAM/VRAM usage.\")\r\n valid_choice = True\r\n continue\r\n print(\"Instancing model...\")\r\n global model_name, ai, tokenizer\r\n if m == 1:\r\n print(\"Instancing GPT-NEO 125M!\")\r\n model_name = 'EleutherAI/gpt-neo-125M'\r\n elif m == 2:\r\n print(\"Instancing GPT-NEO 1.3B!\")\r\n model_name = 'EleutherAI/gpt-neo-1.3B'\r\n elif m == 3:\r\n print(\"Instancing GPT-NEO 2.7B!\")\r\n model_name = 'EleutherAI/gpt-neo-2.7B'\r\n elif m == 4:\r\n model_name = 'TensorFlow GPT-2-124M'\r\n print(\"Instancing GPT-2 124M!\")\r\n ai = aitextgen(tf_gpt2='124M', to_gpu=use_gpu, to_fp16=use_fp16, cache_dir='./models/gpt2-124m')\r\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\n elif m == 5:\r\n model_name = 'TensorFlow GPT-2-355M'\r\n print(\"Instancing GPT-2 355M!\")\r\n ai = aitextgen(tf_gpt2='355M', to_gpu=use_gpu, to_fp16=use_fp16, cache_dir='./models/gpt2-355m')\r\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\n elif m == 6:\r\n model_name = 'TensorFlow GPT-2-774M'\r\n print(\"Instancing GPT-2 774M!\")\r\n ai = aitextgen(tf_gpt2='774M', to_gpu=use_gpu, to_fp16=use_fp16, cache_dir='./models/gpt2-774m')\r\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\n elif m == 7:\r\n model_name = 'TensorFlow GPT-2-1558M'\r\n print(\"Instancing GPT-2 1558M!\")\r\n ai = aitextgen(tf_gpt2='1558M', to_gpu=use_gpu, to_fp16=use_fp16, cache_dir='./models/gpt2-1558m')\r\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\n elif m == 8:\r\n model_name = \"Custom GPT-2 model\"\r\n print(\"Instancing custom model!\")\r\n ai = aitextgen(model_folder = './models/custom', to_gpu=use_gpu, to_fp16=use_fp16, cache_dir = './models')\r\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\r\n elif m == 9:\r\n model_name = input(\"Enter HF Model name in format username/modelname\\nExample: 'EleutherAI/gpt-neo-125M'\\n> \")\r\n print(\"Instancing HF model!\")\r\n ai = aitextgen(model = model_name, to_gpu=use_gpu, to_fp16=use_fp16, cache_dir = './models/huggingface')\r\n tokenizer = GPT2Tokenizer.from_pretrained(model_name)\r\n if ai == None:\r\n ai = aitextgen(model = model_name, to_gpu=use_gpu, to_fp16=use_fp16, cache_dir='./models/' + model_name)\r\n tokenizer = GPT2Tokenizer.from_pretrained(model_name)\r\n return model_name\r\n\r\n# choosing the model...\r\nvalid_choice = False\r\nwhile valid_choice == False:\r\n m = int(input(\"1) GPT-NEO 125M\\n2) GPT-NEO 1.3B\\n3) GPT-NEO 2.7B\\n4) GPT-2 124M\\n5) GPT-2 355M\\n6) GPT-2 774M\\n7) GPT-2 1558M\\n8) Custom GPT-2 Model (local, put files in the folder 'models/custom')\\n9) Custom HF Model\\nChoose a model...\\n> \"))\r\n if m in range(1,10):\r\n m = instance_neo(m)\r\n valid_choice = True\r\n else:\r\n print(\"Invalid model.\")\r\n continue\r\n\r\n# main window, theme code can be very easily swapped for just using the black & white theme instead\r\ntheme_background_color('black')\r\ntheme_text_element_background_color('black')\r\ntheme_button_color('gray')\r\ntheme_text_element_background_color('black')\r\ntheme_element_text_color('white')\r\ntheme_text_color('white')\r\ntheme_slider_color('white')\r\n\r\n# options section\r\nopts_visible = False\r\n\r\noptions_layout = [ [sg.Text('Temperature'), sg.Slider(range = (0.01, 10.0), default_value = outtemp, resolution = 0.01, orientation = 'h', key = '-TEMP-', enable_events = True, size = (80, 16))],\r\n [sg.Text('Length'),sg.Slider(range = (24, 1200), default_value = outlen, resolution = 1, orientation = 'h', key = '-LENG-', enable_events = True, size = (120, 16))],\r\n [sg.Text('Repetition Penalty'),sg.Slider(range = (0.01, 10.0), default_value = outreppen, resolution = 0.01, orientation = 'h', key = '-REPPEN-', enable_events = True, size = (80, 16))],\r\n [sg.Text('Length Penalty'),sg.Slider(range = (0.01, 10.0), default_value = outlenpen, resolution = 0.01, orientation = 'h', key = '-LENPEN-', enable_events = True, size = (80, 16))],\r\n [sg.Text('Top K'),sg.Slider(range = (0, 100), default_value = outtopk, resolution = 1, orientation = 'h', key = '-TOPK-', enable_events = True, size = (30, 16)), sg.Text('Top P'),sg.Slider(range = (0.01, 1.0), default_value = outtopk, resolution = 0.01, orientation = 'h', key = '-TOPP-', enable_events = True, size = (30, 16))] ]\r\n\r\nmain_layout = [ [sg.Text('Yep')],\r\n [sg.Multiline(size = (180,24), key = '-OUTPUT-', text_color='white', background_color='black', autoscroll = True, enable_events = True)],\r\n [sg.Button('Clear'), sg.Button('Options'), sg.Button('Context'), sg.Text('Input box tokens: 0 / Output box tokens: 0', key = '-TOKENCOUNT-')],\r\n [sg.Button('Go', size = (8, 4), key = '-GO-'), sg.Frame(title = 'Options', layout = options_layout, visible = opts_visible, background_color = 'black', key = '-OPTIONS-'), sg.Multiline(size = (160, 4), key = '-INPUT-', text_color='white', background_color='black', autoscroll = True, enable_events = True)] ]\r\n\r\n# open the main window, do events relating to relevant buttons\r\ndef main_loop():\r\n global opts_visible\r\n global outtemp\r\n global outlen\r\n global outreppen\r\n global outlenpen\r\n global outtopk\r\n global outtopp\r\n global back_memory\r\n global aid_style\r\n main_window, cont_win = sg.Window('Main Window', main_layout, finalize = True), None\r\n main_window['-INPUT-'].set_cursor(cursor_color = 'white')\r\n main_window['-OUTPUT-'].set_cursor(cursor_color = 'white')\r\n while True:\r\n window, event, values = sg.read_all_windows()\r\n # window closes\r\n if event == sg.WIN_CLOSED: # if user closes window\r\n window.close()\r\n print(\"Window closed!\")\r\n if window == cont_win: # if closing win 2, mark as closed\r\n cont_win = None\r\n elif window == main_window: # if closing win 1, exit program\r\n break\r\n # generate more stuff\r\n elif event == '-GO-':\r\n print(\"Generate button pressed\")\r\n print(values['-OUTPUT-'].rstrip() + \" \" + values['-INPUT-'].rstrip())\r\n print(values['-INPUT-'].rstrip())\r\n pref = values['-INPUT-'].rstrip()\r\n print(values['-OUTPUT-'].rstrip())\r\n if aid_style == True:\r\n pref = \"> You \" + pref\r\n new_gen = generate(pref, values['-OUTPUT-'].rstrip())\r\n if values['-OUTPUT-'].rstrip() == \"\":\r\n main_window['-OUTPUT-'].update(values['-INPUT-'].rstrip() + new_gen)\r\n elif values['-INPUT-'].rstrip() == \"\":\r\n main_window['-OUTPUT-'].update(values['-OUTPUT-'].rstrip() + new_gen)\r\n else:\r\n main_window['-OUTPUT-'].update(values['-OUTPUT-'].rstrip() + \" \" + values['-INPUT-'].rstrip() + new_gen)\r\n main_window['-INPUT-'].update('')\r\n # clear output history\r\n elif event == 'Clear':\r\n print(\"Clear button pressed\")\r\n if popup_yes_no(\"This will clear the output history.\\nAre you sure?\",title=\"Confirm Clear\", keep_on_top = True) == 'Yes':\r\n main_window['-INPUT-'].update('')\r\n main_window['-OUTPUT-'].update('')\r\n # toggle options visibility\r\n elif event == 'Options':\r\n print(\"Options button pressed\")\r\n if opts_visible:\r\n opts_visible = False\r\n main_window['-INPUT-'].update(visible = True)\r\n else:\r\n opts_visible = True\r\n main_window['-INPUT-'].update(visible = False)\r\n main_window['-OPTIONS-'].update(visible = opts_visible)\r\n # open context window\r\n elif event == 'Context' and not cont_win:\r\n print(\"Context button pressed\")\r\n cont_win = context_window()\r\n # save context\r\n elif event == '-SAVECTX-':\r\n print(\"Saving memory...\")\r\n back_memory = values['-MEMORY-'].rstrip()\r\n\r\n # options\r\n elif event == '-TEMP-':\r\n outtemp = values['-TEMP-']\r\n print(\"Temperature updated to \" + str(outtemp))\r\n elif event == '-LENG-':\r\n outlen = int(values['-LENG-'])\r\n print(\"Length updated to \" + str(outlen))\r\n elif event == '-REPPEN-':\r\n outreppen = values['-REPPEN-']\r\n print(\"Rep penalty updated to \" + str(outreppen))\r\n elif event == '-LENPEN-':\r\n outlenpen = values['-LENPEN-']\r\n print(\"Length penalty updated to \" + str(outlenpen))\r\n elif event == '-TOPK-':\r\n outtopk = int(values['-TOPK-'])\r\n print(\"Top K updated to \" + str(outtopk))\r\n elif event == '-TOPP-':\r\n outtopp = values['-TOPP-']\r\n print(\"Top P updated to \" + str(outtopp))\r\n \r\n # update token counts\r\n if window == main_window:\r\n inptokens, outtokens = get_token_count(values['-INPUT-'].rstrip(), values['-OUTPUT-'].rstrip())\r\n new_text = 'Input box tokens: ' + str(inptokens) + ' / Output box tokens: ' + str(outtokens)\r\n print(new_text)\r\n main_window['-TOKENCOUNT-'].update(value = new_text) # unstable for some reason, the text sometimes just shows up as a single /. not sure why\r\n elif window == cont_win:\r\n ctxtokens = get_context_token_count(values['-MEMORY-'].rstrip())\r\n new_text_2 = 'Memory tokens: ' + str(ctxtokens)\r\n print(new_text_2)\r\n cont_win['-CTXTOKENS-'].update(value = new_text_2) # same as main window's token count\r\n \r\nmain_loop()" } ]
2
kul2002il/rkisQuestions
https://github.com/kul2002il/rkisQuestions
b07e48d36a96fff14f7250ef426cea260a8f9aea
50f957f547ec0dddf7391be3ce4d01ce50ee5ee9
ac8428f26d0f8c89523a6550dd0e8ee74228fd3e
refs/heads/master
2023-01-03T20:07:16.996373
2020-11-02T14:58:19
2020-11-02T14:58:19
309,366,700
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7432432174682617, "alphanum_fraction": 0.7432432174682617, "avg_line_length": 41.35714340209961, "blob_id": "5732c3896744e5845afe8425cbd452e7ce47f4dd", "content_id": "23482166a138bd04bf96d88d72b20907a6e0968d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 592, "license_type": "no_license", "max_line_length": 102, "num_lines": 14, "path": "/main/urls.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\nfrom .views import BBLoginView, BBLogoutView, BBPasswordChangeView, RegisterDoneView, RegisterUserView\n\napp_name = 'main'\n\nurlpatterns = [\n\tpath('', views.index),\n\tpath('accounts/login/', BBLoginView.as_view(), name=\"login\"),\n\tpath('accounts/register/', RegisterUserView.as_view(), name='register'),\n\tpath('accounts/register/done/', RegisterDoneView.as_view(), name='register_done'),\n\tpath('accounts/password/change/', BBPasswordChangeView.as_view(), name='password_change'),\n\tpath('accounts/logout/', BBLogoutView.as_view(), name=\"logout\"),\n]" }, { "alpha_fraction": 0.5388127565383911, "alphanum_fraction": 0.6095890402793884, "avg_line_length": 23.33333396911621, "blob_id": "fc6362e44ccd2db0dcc7d8cb0417e5dbd74c1490", "content_id": "ed2d0db38850b0e0ccd875f56c302892bbd0a2e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 452, "license_type": "no_license", "max_line_length": 101, "num_lines": 18, "path": "/main/migrations/0006_auto_20201102_1214.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2020-11-02 12:14\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0005_auto_20201102_1213'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='question',\n name='datetime',\n field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата публикации'),\n ),\n ]\n" }, { "alpha_fraction": 0.5317460298538208, "alphanum_fraction": 0.5888888835906982, "avg_line_length": 26.39130401611328, "blob_id": "9287bb54c6a665ca6e866a0f318e03e6a69ab8e4", "content_id": "37aa08137ce17655b09d8dfb98aa64ee64f54e5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 103, "num_lines": 23, "path": "/main/migrations/0003_auto_20201102_1211.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2020-11-02 12:11\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0002_auto_20201102_1210'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='question',\n name='datatime2',\n field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Момент публикации'),\n ),\n migrations.AddField(\n model_name='question',\n name='title2',\n field=models.CharField(default='', max_length=255, verbose_name='Название'),\n ),\n ]\n" }, { "alpha_fraction": 0.4863387942314148, "alphanum_fraction": 0.5710382461547852, "avg_line_length": 19.33333396911621, "blob_id": "3c8e1dff2835b23b130a89ee8189cf406babd1dc", "content_id": "7e3cd2d4a2dcbf2a075014fdf5822dfb56e07507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 366, "license_type": "no_license", "max_line_length": 47, "num_lines": 18, "path": "/main/migrations/0005_auto_20201102_1213.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2020-11-02 12:13\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0004_auto_20201102_1212'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='question',\n old_name='datatime',\n new_name='datetime',\n ),\n ]\n" }, { "alpha_fraction": 0.7275902032852173, "alphanum_fraction": 0.7380675077438354, "avg_line_length": 25.78125, "blob_id": "4f59244af26896214b9515a7e6f37334c98abae5", "content_id": "9c2b8b34c041a4234e0554d7a0a238769cb0ae01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 893, "license_type": "no_license", "max_line_length": 94, "num_lines": 32, "path": "/main/models.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\n\nclass User(AbstractUser):\n\tdef Meta(self):\n\t\tpass\n\n\nclass Question(models.Model):\n\ttitle = models.CharField(max_length=255, verbose_name=\"Название\", default='')\n\tdatetime = models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата публикации')\n\ttext = models.CharField(max_length=255, verbose_name=\"Текст\", default='')\n\n\tdef __str__(self):\n\t\treturn self.title\n\n\nclass Answer(models.Model):\n\tquestion = models.ForeignKey(Question, on_delete=models.CASCADE)\n\ttext = models.CharField(max_length=255, verbose_name=\"Вариант\", default='')\n\n\tdef __str__(self):\n\t\treturn self.text\n\n\nclass Voice(models.Model):\n\tanswer = models.ForeignKey(Answer, on_delete=models.CASCADE)\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE)\n\n\tdef __str__(self):\n\t\treturn self.text\n\n\n" }, { "alpha_fraction": 0.786199688911438, "alphanum_fraction": 0.7903816103935242, "avg_line_length": 30.883333206176758, "blob_id": "95131abd224c4fb801ab131041587fa208ac992f", "content_id": "e62b8fb5824ef3d688040039924bdd006ac8aaea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1970, "license_type": "no_license", "max_line_length": 88, "num_lines": 60, "path": "/main/views.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic import UpdateView, CreateView\nfrom django.views.generic.base import TemplateView\nfrom django.contrib.auth.views import LoginView, LogoutView, PasswordChangeView\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.urls import reverse_lazy\n\nfrom .models import Question, User\nfrom .forms import RegisterUserForm, ChangeUserInfoForm\n\n\ndef index(request):\n\tque = Question.objects.all()[:10]\n\tcontext = {\"questions\": que}\n\treturn render(request, 'main/index.html', context=context)\n\n\nclass BBLoginView(LoginView):\n\ttemplate_name = 'main/login.html'\n\n\nclass BBLogoutView(LoginRequiredMixin, LogoutView):\n\ttemplate_name = 'main/logout.html'\n\n\nclass ChangeUserInfoView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):\n\tmodel = User\n\ttemplate_name = 'main/changeUserInfo.html'\n\tform_class = ChangeUserInfoForm\n\tsuccess_url = reverse_lazy('myApp:profile')\n\tsuccess_message = 'Личные данные пользователя изменены'\n\n\tdef dispatch(self, request, *args, **kwargs):\n\t\tself.user_id = request.user.pk\n\t\treturn super().dispatch(request, *args, **kwargs)\n\n\tdef get_object(self, queryset=None):\n\t\tif not queryset:\n\t\t\tqueryset = self.get_queryset()\n\t\treturn get_object_or_404(queryset, pk=self.user_id)\n\n\nclass BBPasswordChangeView(SuccessMessageMixin, LoginRequiredMixin, PasswordChangeView):\n\ttemplate_name = 'main/passwordChange.html'\n\tsuccess_url = reverse_lazy('myApp:profile')\n\tsuccess_message = 'Пароль пользователя изменен'\n\n\nclass RegisterUserView(CreateView):\n\tmodel = User\n\ttemplate_name = 'main/registerUser.html'\n\tform_class = RegisterUserForm\n\tsuccess_url = reverse_lazy('myApp:login')\n\n\nclass RegisterDoneView(TemplateView):\n\ttemplate_name = 'main/registerDone.html'\n" }, { "alpha_fraction": 0.5130168199539185, "alphanum_fraction": 0.5635528564453125, "avg_line_length": 24.115385055541992, "blob_id": "1d465f362c61825508af98b099e6cfb942a1db3c", "content_id": "600b072ee90eba3440f40a58cc7646983f09099f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 103, "num_lines": 26, "path": "/main/migrations/0004_auto_20201102_1212.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.2 on 2020-11-02 12:12\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0003_auto_20201102_1211'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='question',\n name='datatime2',\n ),\n migrations.RemoveField(\n model_name='question',\n name='title2',\n ),\n migrations.AlterField(\n model_name='question',\n name='datatime',\n field=models.DateTimeField(auto_now_add=True, null=True, verbose_name='Момент_публикации'),\n ),\n ]\n" }, { "alpha_fraction": 0.8061224222183228, "alphanum_fraction": 0.8061224222183228, "avg_line_length": 23.5, "blob_id": "a6e2e9c28692753aacea9f618bbc93cf317c164e", "content_id": "55b412da7c00659a594a94b1c7456f2eb499f93c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 49, "num_lines": 8, "path": "/main/admin.py", "repo_name": "kul2002il/rkisQuestions", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import User, Question, Voice, Answer\n\n\nadmin.site.register(User)\nadmin.site.register(Question)\nadmin.site.register(Voice)\nadmin.site.register(Answer)\n" } ]
8
kimhalyn/Bigdata
https://github.com/kimhalyn/Bigdata
a1386fd607cfd10feaceb31d5d2e1b01e3e9a997
e9d43bd3bab3cf3060713cef2540e388154f9489
270b77a4ed7da24e66f4aed6269999a803e6280b
refs/heads/master
2023-07-25T20:48:20.651276
2021-09-07T07:49:36
2021-09-07T07:49:36
324,935,160
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6097561120986938, "alphanum_fraction": 0.6536585092544556, "avg_line_length": 14.692307472229004, "blob_id": "1c103b1f76a5877a2334f8dd54b5d0e9218a1907", "content_id": "aa0afd96cd037af11eab3f5b4dfda833afc5aac1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "no_license", "max_line_length": 49, "num_lines": 13, "path": "/Ch02/2_1_request.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 파이썬 HTML 요청 실습하기\n\"\"\"\nimport urllib.request as req\n\n# 네이버 페이지 요청\nresponse = req.urlopen('http://naver.com').read()\nhtml = response.decode('utf-8')\n\n# 요청페이지 출력\nprint(html)\n\n" }, { "alpha_fraction": 0.6303191781044006, "alphanum_fraction": 0.6781914830207825, "avg_line_length": 17.799999237060547, "blob_id": "a4b95c1a0c2e867c88bd2a85c081d332926886d6", "content_id": "8e579940bdd5b0f41f315df157701f0c773237d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 59, "num_lines": 20, "path": "/Ch02/2_3_daum_news_ranking.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 다음 뉴스 파싱(parsing)하기\n\"\"\"\n\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\n\n# 다음 뉴스 페이지 요청\nresponse = req.get('https://news.daum.net/ranking/popular')\n\n\n# 다음 뉴스 페이지 1~10위까지 파싱\ndom = bs(response.text, 'html.parser')\ntitles = dom.select('ul.list_news2 div.cont_thumb a')\n\n# 1~10위까지 데이터 출력\nfor i in range(10):\n print(titles[i].text)\n" }, { "alpha_fraction": 0.5873016119003296, "alphanum_fraction": 0.6259739995002747, "avg_line_length": 30.5, "blob_id": "5118c52e46f52dca71bb26f0704921ad4797bffb", "content_id": "19ae95069f9d446f6b43d8e43ec38baf598fb16a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3773, "license_type": "no_license", "max_line_length": 127, "num_lines": 110, "path": "/Ch02/2_9_weather_to_db.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/30\n이름 : 김하린\n내용 : 파이썬 기상청 날씨 데이터 수집 DB 저장하기\n\"\"\"\nimport os\nimport requests as req\nimport pymysql as mysql\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\nfrom selenium import webdriver\n\n# 리눅스용 Chrome 브라우저 설치\n# 크롬 가상 웹브라우저 실행(headless 모드)\nchrome_option = webdriver.ChromeOptions()\nchrome_option.add_argument('--headless')\nchrome_option.add_argument('--no-sandbox')\nchrome_option.add_argument('--disable-dev-shm-usage')\nbrowser = webdriver.Chrome('./chromedriver.exe', options=chrome_option)\nbrowser.implicitly_wait(3)\n\nbrowser.get('https://www.weather.go.kr/w/weather/now.do')\nbrowser.implicitly_wait(3)\n\ntrs = browser.find_elements_by_css_selector('#sfc-city-weather > div.cont-box02 > div > div.cont02 > div > table > tbody > tr')\n\n\n# 테이블 생성\n# 테이블명 구하기\ntable_name = \"{:%Y-%m-%d}\".format(datetime.now())\n\n# 1단계 - 데이터베이스 접속\nconn = mysql.connect(host='192.168.56.104',\n user='root',\n password='1234',\n db='weather',\n charset='utf8')\n\n# 2단계 - SQL 실행객체\ncursor = conn.cursor()\n\n# 3단계 - SQL 실행\nsql = \"CREATE TABLE IF NOT EXISTS `%s` (\"\nsql += \"`col1` VARCHAR(10) COMMENT '지역',\"\nsql += \"`col2` VARCHAR(10) COMMENT '현재일기',\"\nsql += \"`col3` VARCHAR(10) COMMENT '시정',\"\nsql += \"`col4` TINYINT COMMENT '운량', \"\nsql += \"`col5` TINYINT COMMENT '중하운량',\"\nsql += \"`col6` DOUBLE COMMENT '현재온도',\"\nsql += \"`col7` DOUBLE COMMENT '이슬점온도',\"\nsql += \"`col8` DOUBLE COMMENT '체감온도',\"\nsql += \"`col9` DOUBLE COMMENT '일강수',\"\nsql += \"`col10` DOUBLE COMMENT '적설',\"\nsql += \"`col11` TINYINT COMMENT '습도',\"\nsql += \"`col12` VARCHAR(10) COMMENT '풍향', \"\nsql += \"`col13` DOUBLE COMMENT '풍속',\"\nsql += \"`col14` DOUBLE COMMENT '해면기압',\"\nsql += \"`rdate` DATETIME COMMENT '수집일'\"\nsql += \");\"\n\ncursor.execute(sql % table_name)\n\n\n# 테이블 저장(INSERT)\nsql_insert = \"INSERT INTO `\"+table_name+\"` SET \"\nsql_insert += \"`col1`='%s',\"\nsql_insert += \"`col2`='%s',\"\nsql_insert += \"`col3`='%s',\"\nsql_insert += \"`col4`='%s',\"\nsql_insert += \"`col5`='%s',\"\nsql_insert += \"`col6`='%s',\"\nsql_insert += \"`col7`='%s',\"\nsql_insert += \"`col8`='%s',\"\nsql_insert += \"`col9`='%s',\"\nsql_insert += \"`col10`='%s',\"\nsql_insert += \"`col11`='%s',\"\nsql_insert += \"`col12`='%s',\"\nsql_insert += \"`col13`='%s',\"\nsql_insert += \"`col14`='%s',\"\nsql_insert += \"`rdate`=NOW()\"\n\nfor tr in trs:\n v1 = tr.find_element_by_css_selector('td:nth-child(1) > a').text\n v2 = tr.find_element_by_css_selector('td:nth-child(2)').text\n v3 = tr.find_element_by_css_selector('td:nth-child(3)').text\n v4 = tr.find_element_by_css_selector('td:nth-child(4)').text\n v5 = tr.find_element_by_css_selector('td:nth-child(5)').text\n v6 = tr.find_element_by_css_selector('td:nth-child(6)').text\n v7 = tr.find_element_by_css_selector('td:nth-child(7)').text\n v8 = tr.find_element_by_css_selector('td:nth-child(8)').text\n v9 = tr.find_element_by_css_selector('td:nth-child(9)').text\n v10 = tr.find_element_by_css_selector('td:nth-child(10)').text\n v11 = tr.find_element_by_css_selector('td:nth-child(11)').text\n v12 = tr.find_element_by_css_selector('td:nth-child(12)').text\n v13 = tr.find_element_by_css_selector('td:nth-child(13)').text\n v14 = tr.find_element_by_css_selector('td:nth-child(14)').text\n\n\n cursor.execute(sql_insert % (v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14))\n conn.commit()\n\n# 4단계 - 데이터베이스 종료\nconn.close()\n\n\n# 브라우저 종료\nbrowser.close()\nbrowser.quit()\n\nprint('날씨 데이터 INSERT 완료')\n" }, { "alpha_fraction": 0.6720720529556274, "alphanum_fraction": 0.6954954862594604, "avg_line_length": 19.55555534362793, "blob_id": "70a1a5105960800534e3aa40ea68b5ede108e277", "content_id": "4b55f4bf9ddcde228394899b0096c4afaab948cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 673, "license_type": "no_license", "max_line_length": 62, "num_lines": 27, "path": "/Ch02/2_6_selenium.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 파이썬 가상웹브라우저 실습하기\n\"\"\"\nfrom selenium import webdriver\n\n# 크롬 가상 브라우저 실행\nbrowser = webdriver.Chrome('./chromedriver.exe')\n\n# 네이버 접속\nbrowser.get('http://naver.com')\n\n# 로그인 버튼 클릭\na_login = browser.find_element_by_css_selector('#account > a')\na_login.click()\n\n# 아이디, 비밀번호 입력\ninput_id = browser.find_element_by_css_selector('#id')\ninput_pw = browser.find_element_by_css_selector('#pw')\n\ninput_id.send_keys('abcde')\ninput_pw.send_keys('12345')\n\n# 로그인 클릭\ninput_submit = browser.find_element_by_id('log.login')\ninput_submit.click()\n" }, { "alpha_fraction": 0.4571428596973419, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 9.142857551574707, "blob_id": "8a2d7678bc2b71a26c4e12aa91534853af345dbb", "content_id": "b5666638d8c66882312c99c6be4a511f78a5cfba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/Ch01/1_1_hello.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 빅데이터 개요\n\"\"\"\n\nprint('Hello Bigdata!')" }, { "alpha_fraction": 0.4836065471172333, "alphanum_fraction": 0.6034836173057556, "avg_line_length": 36.46154022216797, "blob_id": "d4f704dd245a1c56f07eb3ca166cbfd0e7431647", "content_id": "41fb3c5df4293d835343b6da48e771adac3d1287", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1100, "license_type": "no_license", "max_line_length": 124, "num_lines": 26, "path": "/Ch03/3_1_mongodb.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2021/01/04\n이름 : 김하린\n내용 : 파이썬 Mongodb 프로그래밍\n\"\"\"\n\nfrom pymongo import MongoClient as mongo\nfrom _datetime import datetime\n\n# 1단계 - mongodb 접속\nconn = mongo('mongodb://khl:[email protected]:27017')\n\n# 2단계 - DB 선택\ndb = conn.get_database('khl')\n\n# 3단계 - collection(table) 선택\ncollection = db.get_collection('member')\n\n# 4단계 - 쿼리 실행\ncollection.insert_one({'uid':'a101', 'name':'김유신', 'hp':'010-1234-1111', 'pos':'사원', 'dep':101, 'rdate': datetime.now()})\ncollection.insert_one({'uid':'a102', 'name':'김춘추', 'hp':'010-1234-2222', 'pos':'과장', 'dep':103, 'rdate': datetime.now()})\ncollection.insert_one({'uid':'a103', 'name':'장보고', 'hp':'010-1234-3333', 'pos':'대리', 'dep':102, 'rdate': datetime.now()})\ncollection.insert_many([{'uid':'a104', 'name':'강감찬', 'hp':'010-1234-4444', 'pos':'차장', 'dep':104, 'rdate': datetime.now()},\n {'uid':'a105', 'name':'이순신', 'hp':'010-1234-5555', 'pos':'부장', 'dep':105, 'rdate': datetime.now()}])\n\nprint('Insert 완료...')\n\n\n" }, { "alpha_fraction": 0.6763392686843872, "alphanum_fraction": 0.6882440447807312, "avg_line_length": 25.352941513061523, "blob_id": "bfe1bc2eaac673f691d3958ceae9d771a5b79bb7", "content_id": "de5cbbe5a820422194e51ba61a029846af30ab64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1522, "license_type": "no_license", "max_line_length": 122, "num_lines": 51, "path": "/Ch02/2_7_naver.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/29\n이름 : 김하린\n내용 : 네이버 실시간 검색어 수집하기\n\"\"\"\n\nimport os\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nfrom datetime import datetime\n\n# 리눅스용 Chrome 브라우저 설치\n# 크롬 가상 웹브라우저 실행(headless 모드)\nchrome_option = webdriver.ChromeOptions()\nchrome_option.add_argument('--headless')\nchrome_option.add_argument('--no-sandbox')\nchrome_option.add_argument('--disable-dev-shm-usage')\nbrowser = webdriver.Chrome('./chromedriver.exe', options=chrome_option)\nbrowser.implicitly_wait(3)\n\n# 네이버 데이터랩 이동\nbrowser.get('https://datalab.naver.com/keyword/realtimeList.naver')\nbrowser.implicitly_wait(3)\n\n# 네이버 실검 1 ~ 10까지 파싱\nitem_boxs = browser.find_elements_by_css_selector('#content .selection_area .field_list ul:nth-child(1) > li > .item_box')\n\n# 디렉터리 생성\ndir = \"./naver/{:%Y-%m-%d}\".format(datetime.now())\n\nif not os.path.exists(dir):\n os.makedirs(dir)\n\n# 파일 저장\nfname = \"{:%y-%m-%d-%H-%M.txt}\".format(datetime.now())\nfile = open(dir+'/'+fname, mode='w', encoding='utf8')\nfile.write('순위,제목,날짜\\n')\n\nfor item_box in item_boxs:\n file.write('%s,' % item_box.find_element_by_css_selector('.item_num').text)\n file.write('%s,' % item_box.find_element_by_css_selector('.item_title').text)\n file.write('%s\\n' % \"{:%y%m%d%H%M%S}\".format(datetime.now()))\n\n\nfile.close()\n\n# 브라우저 종료\nbrowser.close()\n\nprint('수집완료...')\n" }, { "alpha_fraction": 0.6563981175422668, "alphanum_fraction": 0.6777251362800598, "avg_line_length": 16.54166603088379, "blob_id": "60f4ef80de0675937aa7aa8aac71a5bf730d4752", "content_id": "af521731d171a60c90af01ea755214e842ff005e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 548, "license_type": "no_license", "max_line_length": 49, "num_lines": 24, "path": "/Ch02/2_2_parsing.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 파이썬 HTML 페이지 파싱(parsing)하기\n\nparsing\n- 문서 해독을 의미하는 용어 \n- 마크업문서(HTML, XML)에서 특정 태그의 데이터를 추출하는 처리과정\n\"\"\"\n\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\n\n# HTML 요청\nresponse = req.get('https://news.daum.net/')\nprint(response.text)\n\n# HTML 파싱\ndom = bs(response.text, 'html.parser')\ntitles = dom.select('.list_issue .tit_thumb > a')\n\n# 데이터 출력\nfor tit in titles:\n print(tit.text)\n\n" }, { "alpha_fraction": 0.5752032399177551, "alphanum_fraction": 0.5975610017776489, "avg_line_length": 21.363636016845703, "blob_id": "080495a39038010f20a35c1c2e315959d0c806f6", "content_id": "61fc23f5b9e62f99166838d291303e8a9d371f06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 558, "license_type": "no_license", "max_line_length": 62, "num_lines": 22, "path": "/Ch02/2_4_xml_parsing.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 파이썬 XML 문서 파싱(parsing)하기\n\"\"\"\n\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\n\n# XML 요청\nresponse = req.get('https://www.w3schools.com/xml/simple.xml')\n\n# XML 파싱\ndom = bs(response.text, 'html.parser')\nfoods = dom.select('breakfast_menu > food')\n\n# 데이터 출력\nfor food in foods:\n print('-------------------------------------')\n print('이름 : ', food.findChildren()[0].text)\n print('가격 : ', food.price.text)\n print('열량 : ', food.calories.text)\n" }, { "alpha_fraction": 0.6353322267532349, "alphanum_fraction": 0.6580227017402649, "avg_line_length": 22.615385055541992, "blob_id": "dbe3bd638b1438c37ce6757c32c47b6405389088", "content_id": "f0416433622e0e39a73e55b6b25ae57a9ae086e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 729, "license_type": "no_license", "max_line_length": 74, "num_lines": 26, "path": "/Ch02/2_5_busan.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/28\n이름 : 김하린\n내용 : 파이썬 데이터 전송하기\n\"\"\"\n\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\n\n# 세션시작\nsess = req.session()\n\n# 부산일보 로그인 페이지 요청\nurl = 'https://here.busan.com/bbs/login_check.php'\nsess.post(url, data={'mb_id': 'ksb0503', 'mb_password': '123456789'})\n\n# 마이페이지 요청\nhtml = sess.get('https://here.busan.com/member/member_mypage.php')\n\n# HTML 파싱 후 데이터 출력\ndom = bs(html.text, 'html.parser')\nspan_id = dom.select_one('#design_contents > dl > dd > span.id')\npoint = dom.select_one('#design_contents > div.point > font:nth-child(1)')\n\nprint('아이디 : ', span_id.text)\nprint('포인트 : ', point.text)\n\n\n\n" }, { "alpha_fraction": 0.6287640333175659, "alphanum_fraction": 0.661573052406311, "avg_line_length": 33.230770111083984, "blob_id": "6f825c3bdd68bb10f7ba97b641ede336665a6a7d", "content_id": "0a67a66f3f67f460467955cea72e5a380fc23ae3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2463, "license_type": "no_license", "max_line_length": 138, "num_lines": 65, "path": "/Ch02/2_8_weather.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2020/12/30\n이름 : 김하린\n내용 : 파이썬 기상청 날씨 데이터 수집하기\n\"\"\"\nimport os\nimport requests as req\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\nfrom selenium import webdriver\n\n# 리눅스용 Chrome 브라우저 설치\n# 크롬 가상 웹브라우저 실행(headless 모드)\nchrome_option = webdriver.ChromeOptions()\nchrome_option.add_argument('--headless')\nchrome_option.add_argument('--no-sandbox')\nchrome_option.add_argument('--disable-dev-shm-usage')\nbrowser = webdriver.Chrome('./chromedriver.exe', options=chrome_option)\nbrowser.implicitly_wait(3)\n\nbrowser.get('https://www.weather.go.kr/w/weather/now.do')\nbrowser.implicitly_wait(3)\n\ntrs = browser.find_elements_by_css_selector('#sfc-city-weather > div.cont-box02 > div > div.cont02 > div > table > tbody > tr')\n\n# 디렉터리 생성\ndir = \"./weather/{:%Y-%m-%d}\".format(datetime.now())\n\nif not os.path.exists(dir):\n os.makedirs(dir)\n\n\n# 파일로 저장\nfname = \"{:%y-%m-%d-%H-%M-.txt}\".format(datetime.now())\nfile = open(dir+'/'+fname, mode='w', encoding='utf-8')\n\nfile.write('지점, 현재일기, 시정, 운량, 중하운량, 현재기온, 이슬점온도, 체감온도, 일강수, 적설, 습도, 풍향, 풍속, 해면기압\\n')\n\n\nfor tr in trs:\n v1 = tr.find_element_by_css_selector('td:nth-child(1) > a').text\n v2 = tr.find_element_by_css_selector('td:nth-child(2)').text\n v3 = tr.find_element_by_css_selector('td:nth-child(3)').text\n v4 = tr.find_element_by_css_selector('td:nth-child(4)').text\n v5 = tr.find_element_by_css_selector('td:nth-child(5)').text\n v6 = tr.find_element_by_css_selector('td:nth-child(6)').text\n v7 = tr.find_element_by_css_selector('td:nth-child(7)').text\n v8 = tr.find_element_by_css_selector('td:nth-child(8)').text\n v9 = tr.find_element_by_css_selector('td:nth-child(9)').text\n v10 = tr.find_element_by_css_selector('td:nth-child(10)').text\n v11 = tr.find_element_by_css_selector('td:nth-child(11)').text\n v12 = tr.find_element_by_css_selector('td:nth-child(12)').text\n v13 = tr.find_element_by_css_selector('td:nth-child(13)').text\n v14 = tr.find_element_by_css_selector('td:nth-child(14)').text\n\n file.write('%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\\n' % (v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14))\n\n# 파일 닫기\nfile.close()\n\n# 브라우저 종료\nbrowser.close()\nbrowser.quit()\n\nprint('날씨 데이터 수집 완료')\n" }, { "alpha_fraction": 0.4820936620235443, "alphanum_fraction": 0.5399448871612549, "avg_line_length": 22.419355392456055, "blob_id": "78784141ee822c45feeba4a93a528c1690a1ae00", "content_id": "a33789b9653bc7e48fb00589e7a5c895d0474fc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 796, "license_type": "no_license", "max_line_length": 81, "num_lines": 31, "path": "/Ch03/3_2_mongo_find.py", "repo_name": "kimhalyn/Bigdata", "src_encoding": "UTF-8", "text": "\"\"\"\n날짜 : 2021/01/04\n이름 : 김하린\n내용 : 파이썬 Mongodb 프로그래밍\n\"\"\"\n\nfrom pymongo import MongoClient as mongo\nfrom _datetime import datetime\n\n# 1단계 - mongodb 접속\nconn = mongo('mongodb://khl:[email protected]:27017')\n\n# 2단계 - DB 선택\ndb = conn.get_database('khl')\n\n# 3단계 - collection(table) 선택\ncollection = db.get_collection('member')\n\n# 4단계 - 쿼리 실행\nrs1 = collection.find()\n\nfor row in rs1:\n print('---------------------------------------')\n print('%s, %s, %s' % (row['uid'], row['name'], row['hp']))\n\n# select * from `member` where uid='a101'\nrs2 = collection.find({'uid':'a101'})\n\nfor row in rs2:\n print('---------------------------------------')\n print('%s, %s, %s, %s' % (row['name'], row['pos'], row['dep'], row['rdate']))\n" } ]
12
barraponto/python-pawn
https://github.com/barraponto/python-pawn
2f79e5e45b499083ee79c1fa9de24e289832a8eb
23418526e02765ef531f5e19b760770f36c06003
288ab0b55a1cd9091969cf5b90b6fc04ba90af6b
refs/heads/master
2021-01-13T02:07:33.184026
2015-03-24T05:29:45
2015-03-24T05:29:45
32,663,834
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5666666626930237, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 7.5714287757873535, "blob_id": "ea6a8d935ae6e8f22ec10a6facfcc96240fe4697", "content_id": "0dbf68433c55ca87b99ab3fc32081c204818920c", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 60, "license_type": "permissive", "max_line_length": 26, "num_lines": 7, "path": "/docs/usage.rst", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "=====\nUsage\n=====\n\nTo use Pawn in a project::\n\n\timport pawn\n" }, { "alpha_fraction": 0.6126126050949097, "alphanum_fraction": 0.6396396160125732, "avg_line_length": 17.5, "blob_id": "4cdead35993d79b114081353cc28a3f719e2f0bd", "content_id": "a7583dc674ebc71cadf0ce091ada52f8f9dd65b3", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 111, "license_type": "permissive", "max_line_length": 30, "num_lines": 6, "path": "/src/pawn/__init__.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "__version__ = \"0.1.0\"\n\nfrom pawn.game import Game\nfrom pawn.player import Player\n\n__all__ = ['Game', 'Player']\n" }, { "alpha_fraction": 0.5718954205513, "alphanum_fraction": 0.5784313678741455, "avg_line_length": 23.783782958984375, "blob_id": "dd328108362ba2e8eb139e405e0f32822e63ab3f", "content_id": "9cc6891bdb5d898a75c9fe2541ae85e70dd8e88e", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 924, "license_type": "permissive", "max_line_length": 77, "num_lines": 37, "path": "/tests/test_cards.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport itertools\n\nimport pytest\n\nfrom pawn.cards import Card, Deck\n\n\[email protected]('suit,rank',\n itertools.product(['♣', '♥'], ['A', '3']))\ndef test_card(suit, rank):\n card = Card(suit=suit, rank=rank)\n assert card.suit == suit\n assert card.rank == rank\n\[email protected]('suit,rank',\n [pytest.mark.xfail((None, None), raises=ValueError),\n pytest.mark.xfail(('♣', None), raises=ValueError),\n pytest.mark.xfail((None, '3'), raises=ValueError)])\ndef test_bad_card(suit, rank):\n assert Card(suit=suit, rank=rank)\n\ndef test_deck():\n deck = Deck()\n assert len(deck.cards) == 52\n\ndef test_deck_card_class():\n\n class TestCard(Card):\n pass\n\n class TestDeck(Deck):\n card_class = TestCard\n\n deck = TestDeck()\n assert type(deck.cards[0]) == TestCard\n\n" }, { "alpha_fraction": 0.6439957618713379, "alphanum_fraction": 0.6493092179298401, "avg_line_length": 25.885713577270508, "blob_id": "5f89915a18ef0d6dde7b599211184b9e6d69385a", "content_id": "9b6f7912312bf3290c69a3e7a8961d980c7574ab", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 941, "license_type": "permissive", "max_line_length": 69, "num_lines": 35, "path": "/tests/test_pawn.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "import pytest\n\nfrom pawn.game import Game\nfrom pawn.game import Player\n\n\[email protected]('number_of_players',\n [2, pytest.mark.xfail(3), 4])\ndef test_number_of_players(number_of_players):\n game = Game(number_of_players=number_of_players)\n assert len(game.players) == number_of_players\n assert repr(game) == \"Game(number_of_players={number})\".format(\n number=number_of_players)\n\[email protected]('name', ['Fox', 'Falco', 'Peppy', 'Slippy'])\ndef test_player_name(name):\n player = Player(name=name)\n assert player.name == name\n assert repr(player) == \"Player(name={name!r})\".format(name=name)\n\ndef test_game_player_class():\n\n class TestPlayer(Player):\n pass\n\n class TestGame(Game):\n player_class = TestPlayer\n\n game = TestGame()\n assert type(game.players[0]) == TestPlayer\n\n\ndef test_main():\n from pawn.__main__ import main\n assert main([]) == 0\n" }, { "alpha_fraction": 0.5824176073074341, "alphanum_fraction": 0.5824176073074341, "avg_line_length": 26.299999237060547, "blob_id": "9fe56920552a566ed7dc71eeea1eaefe282e24c3", "content_id": "534cc70b30df42130ab9dc7e82c12ca3c39654d0", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "permissive", "max_line_length": 61, "num_lines": 10, "path": "/src/pawn/player.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "class Player(object):\n \"\"\"Creates a named player for the game.\n\n :param name: a name string for the player.\n \"\"\"\n def __init__(self, name='John Doe'):\n self.name = name\n\n def __repr__(self):\n return 'Player(name={name!r})'.format(name=self.name)\n" }, { "alpha_fraction": 0.591549277305603, "alphanum_fraction": 0.597951352596283, "avg_line_length": 31.54166603088379, "blob_id": "fff847c9f5c2e532cd7cd1af143e510f51324801", "content_id": "258444d1abdcf8409e13b5c1f365822516406c2a", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 781, "license_type": "permissive", "max_line_length": 76, "num_lines": 24, "path": "/src/pawn/game.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "from pawn.player import Player\n\n\nclass Game(object):\n \"\"\"Creates an N-players game.\n\n :param number_of_players: the number of players for the current game.\n \"\"\"\n\n player_class = Player\n supported_number_of_players = [2, 4]\n\n def __init__(self, number_of_players=2):\n if number_of_players not in self.supported_number_of_players:\n raise ValueError(\n 'Unsupported number of players: {number}.'.format(\n number=number_of_players))\n else:\n self.players = [self.player_class(name='Player {number}'.format(\n number=n)) for n in range(1, 1 + number_of_players)]\n\n def __repr__(self):\n return 'Game(number_of_players={number!r})'.format(\n number=len(self.players))\n" }, { "alpha_fraction": 0.5010846257209778, "alphanum_fraction": 0.5162689685821533, "avg_line_length": 30.79310417175293, "blob_id": "6c8b7cca5491182d10577bff3e97ceec5f55634f", "content_id": "7567b81fadb3749836422644b62c45a1632c7c9d", "detected_licenses": [ "BSD-2-Clause", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 938, "license_type": "permissive", "max_line_length": 78, "num_lines": 29, "path": "/src/pawn/cards.py", "repo_name": "barraponto/python-pawn", "src_encoding": "UTF-8", "text": "# coding: utf-8\n\nimport itertools\n\n\nclass Card(object):\n \"\"\"Represents a playing card with suit and rank.\n\n :param suit: the card suit, usually one of ♠♥♦♣.\n :param rank: the card rank, usually one of A[2-10]JQK.\n \"\"\"\n def __init__(self, suit=None, rank=None):\n if not all([suit, rank]):\n raise ValueError('Card should have both suit and rank defined,'\n ' got {suit!r} and {rank!r}.'.format(\n suit=suit, rank=rank))\n self.suit = suit\n self.rank = rank\n\n\nclass Deck(object):\n '''The French standard playing card deck.'''\n card_class = Card\n suits = ['♠', '♥', '♦', '♣']\n ranks = ['A', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K']\n\n def __init__(self):\n self.cards = [self.card_class(suit, rank) for suit, rank\n in itertools.product(self.suits, self.ranks)]\n" } ]
7
AkashRamlal1/robotarm
https://github.com/AkashRamlal1/robotarm
787a3002624ebb967216bc1a5a5b508445d8b256
9b575f20daf7638e6e68115bd726e45b3077fd2a
6a7e9490655fcde3d0cf3b067de4bbf1df1fe46a
refs/heads/main
2023-08-27T17:20:58.503260
2021-11-01T11:12:46
2021-11-01T11:12:46
420,957,286
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5978090763092041, "alphanum_fraction": 0.6071987748146057, "avg_line_length": 28.045454025268555, "blob_id": "d548320e6ca498dd597122216d3e6fdb4cd4ed8e", "content_id": "0263b092b6b89a6324fd607d5f1694ecd16c2115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 106, "num_lines": 22, "path": "/example11.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 11')\n\n# Jouw python instructies zet je vanaf hier:\n\nkleur = \"white\"\n# Na jouw code wachten tot het sluiten van de window:\nfor blok in range(1):\n for blok in range(9):\n robotArm.moveRight();\n for blok in range(15):\n robotArm.grab()\n kleur = robotArm.scan() # de command robotArm.scan controleert of he blokje de gewenste kleur is \n print(kleur)\n if kleur == \"white\":\n robotArm.moveRight();\n robotArm.drop()\n robotArm.moveLeft();\n else:\n robotArm.drop()\n robotArm.moveLeft();\n" }, { "alpha_fraction": 0.6598984599113464, "alphanum_fraction": 0.6751269102096558, "avg_line_length": 23.6875, "blob_id": "8d163c4fe35d10b5c9cf4786bf6f995de88bd2d3", "content_id": "8a4994d6f38c5dcad7dbf29174f7a108f28cd328", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/example10.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 10')\n\n# Jouw python instructies zet je vanaf hier:\n\nafstand = 10\n# Na jouw code wachten tot het sluiten van de window:\nfor blok in range(5):\n afstand = afstand - 1\n for blok in range(afstand):\n robotArm.grab()\n robotArm.moveRight();\n robotArm.drop()\n for blok in range(afstand):\n robotArm.moveLeft();" }, { "alpha_fraction": 0.745945930480957, "alphanum_fraction": 0.7513513565063477, "avg_line_length": 14.5, "blob_id": "385f27b86ca43fdfdd856438f11917260e52654c", "content_id": "3470629c7f3c31f3eb0522730aa6add2938b92c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 53, "num_lines": 12, "path": "/example.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 1')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()" }, { "alpha_fraction": 0.59375, "alphanum_fraction": 0.6076388955116272, "avg_line_length": 26.4761905670166, "blob_id": "a472ddc7b9f84dd8dacc9c7b15691dea9b092ef6", "content_id": "830ad07e16a74e733ff2245d20adebc0c7350b2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 54, "num_lines": 21, "path": "/example13.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n# Let op: hier start het anders voor een random level:\nrobotArm = RobotArm()\nrobotArm.randomLevel(1,7)\n\n# Jouw python instructies zet je vanaf hier:\n\n# Na jouw code wachten tot het sluiten van de window:\nfor blok in range(0, 9):\n robotArm.grab()\n kleur = robotArm.scan()\n print(kleur)\n if kleur != \"\":\n for blokje in range(0, blok + 1):\n robotArm.moveRight();\n robotArm.drop()\n for blokje in range(0, blok + 1):\n robotArm.moveLeft();\n print(\"\")\n elif kleur == \"\":\n break" }, { "alpha_fraction": 0.4904458522796631, "alphanum_fraction": 0.4904458522796631, "avg_line_length": 25.33333396911621, "blob_id": "817df7b4f8538a99772447294ef3ba030aac3290", "content_id": "d3b574e5d0f80278d04a995a6b1efcd6f7baa91d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 157, "license_type": "no_license", "max_line_length": 32, "num_lines": 6, "path": "/tempCodeRunnerFile.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "robotArm.moveRight();\n robotArm.drop()\n robotArm.moveLeft();\n else:\n robotArm.drop()\n robotArm.moveLeft();" }, { "alpha_fraction": 0.644385039806366, "alphanum_fraction": 0.6577540040016174, "avg_line_length": 22.4375, "blob_id": "ecd6f655a4f1d469490d4719bdd53680c7598f88", "content_id": "3465984b8d58c1db486376015b81bc45ef7956a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 374, "license_type": "no_license", "max_line_length": 53, "num_lines": 16, "path": "/example8.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 8')\n\n# Jouw python instructies zet je vanaf hier:\nblok = 1\n\n# Na jouw code wachten tot het sluiten van de window:\nfor blok in range (7):\n for blok in range (9):\n robotArm.moveRight();\n robotArm.grab()\n robotArm.drop()\n for blok in range (8):\n robotArm.moveLeft();\n robotArm.grab()" }, { "alpha_fraction": 0.6449275612831116, "alphanum_fraction": 0.6594203114509583, "avg_line_length": 19.649999618530273, "blob_id": "ed7c673a4250280146eea0bdd65fd3914d9ca1b9", "content_id": "4a6f51768241a84f4a5d320f2a37f3e3394b67c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "no_license", "max_line_length": 53, "num_lines": 20, "path": "/example7.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 7')\nrobotArm.speed = 2\n\n\n# Jouw python instructies zet je vanaf hier\nblokje=1\n\n\n\n# Na jouw code wachten tot het sluiten van de window:\nfor blokje in range(5):\n for blokje in range(6):\n robotArm.moveRight();\n robotArm.grab()\n robotArm.moveLeft();\n robotArm.drop()\n for blokje in range(2):\n robotArm.moveRight();\n\n" }, { "alpha_fraction": 0.7368533611297607, "alphanum_fraction": 0.7545959949493408, "avg_line_length": 20.75813865661621, "blob_id": "f3167ab100fea60bf367039a79460190321aa325", "content_id": "d6f718e8ea4b9d40d1e457bbf96645ba1982ea47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4690, "license_type": "no_license", "max_line_length": 135, "num_lines": 215, "path": "/README.md", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "# Robotarm\n\nLees de instructies op de wiki pagina hoe de robotarm bibliotheek werkt. Vervolgens ga je proberen de onderstaande oefeningen te maken.\n\n## Oefening 1\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 1')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats het rode blok één plek naar links.\n\n![Oefening 1](readme/exercise1.png)\n\n## Oefening 2\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 2')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nStappel alle blokken op aan de rechterkant.\n\n![Oefening 2](readme/exercise2.png)\n\n## Oefening 3\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 3')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats de hele stapel blokken één plek naar rechts.\n\n![Oefening 3](readme/exercise3.png)\n\n## Oefening 4\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 4')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats de hele stapel blokken één plek naar rechts. Zorg ervoor dat de volgorde van de blokken gelijk blijft.\n\n![Oefening 4](readme/exercise4.png)\n\n## Oefening 6\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 6')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats alle blokken één plek naar rechts. Zorg ervoor dat de volgorde van de blokken gelijk blijft. \n\n![Oefening 6](readme/exercise6.png)\n\n## Oefening 7\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 7')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats iedere stapel één plek naar links.\n\nJe mag maximaal 11 regels code gebruiken inclusief de import, het laden van de robotarm en de wait\n\n![Oefening 7](readme/exercise7.png)\n\n## Oefening 8\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 8')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats de stapel naar de rechterkant.\n\nJe mag maximaal 11 regels code gebruiken inclusief de import, het laden van de robotarm en de wait\n\n![Oefening 8](readme/exercise8.png)\n\n## Oefening 9\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 9')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats alle stapels vijf stappen naar rechts.\n\nJe mag maximaal 12 regels code gebruiken inclusief de import, het laden van de robotarm en de wait\n\n![Oefening 9](readme/exercise9.png)\n\n## Oefening 10\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 10')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nDraai de volgorde van de blokken om.\n\nJe mag maximaal 15 regels code gebruiken inclusief de import, het laden van de robotarm en de wait\n\n![Oefening 10](readme/exercise10.png)\n\n## Oefening 11\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 11')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats alle witte blokken één plek naar rechts. \n\nLet op, de blokken zijn iedere keer anders als je het programma start!\n\n![Oefening 11](readme/exercise11.png)\n\n## Oefening 12\n```python\nfrom RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 12')\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerplaats alle rode blokken naar het einde.\n\nLet op, de blokken zijn iedere keer anders als je het programma start!\n\n![Oefening 12](readme/exercise12.png)\n\n## Oefening 13\n```python\nfrom RobotArm import RobotArm\n# Let op: hier start het anders voor een random level:\nrobotArm = RobotArm()\nrobotArm.randomLevel(1,7)\n\n# Jouw python instructies zet je vanaf hier:\n\n\n# Na jouw code wachten tot het sluiten van de window:\nrobotArm.wait()\n```\nVerdeel alle blokken over de lege plaatsen, zodra er geen blokken meer zijn moet de arm stoppen.\n![Oefening 12](readme/exercise13-start.png)\n\nresultaat na verdeling ziet er zo uit:\n\n![Oefening 12](readme/exercise13-end.png)\n\n[Bonus opdrachten](https://www.dropbox.com/s/7q4o3xboi5whgop/RobotArm%20Puzzels.docx?dl=0)\n\nSucces!\n" }, { "alpha_fraction": 0.5833792090415955, "alphanum_fraction": 0.594639241695404, "avg_line_length": 35.97058868408203, "blob_id": "4c80e8b19ad309f07685be8f70af5ac5bc8f4176", "content_id": "3bd5c857c8a27b5bc80da4c4b7c8dc8ead1c214b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16341, "license_type": "no_license", "max_line_length": 277, "num_lines": 442, "path": "/RobotArm.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "import pygame # install in terminal with: pip install pygame\nimport sys\nimport random\n\n# RobotArm class ################################################\n#\n# An object of this class...\n#\n# lets you load and display a yard with stacks of colored boxes\n# you can load a predefined level at the creation\n# lets you program the movement of boxes and scan their colors\n# lets you inspect the yard for debugging purposes\n#\n# supported colors are: white, green, red, blue and yellow\n# \n# ######## methods for public use:\n# moveRight()\n# moves the robotarm one stack position to the right\n# returns True if succeeded, returns False if not possible\n#\n# moveLeft()\n# moves the robotarm one stack position to the left\n# returns True if succeeded, returns False if not possible\n#\n# grab()\n# lets the robotarm grab a box from the stack if there is one\n# returns True if succeeded, returns False if not possible\n#\n# drop()\n# lets the robotarm drop its box to the stack if not full\n# returns True if succeeded, returns False if not possible\n#\n# scan()\n# returns the color of the box at the robotarm\n#\n# wait(operator)\n# waits for the the program window to be closed\n# operator is an optional function with a parameter: events {list of events}\n# the operator must/can handle each event in events\n#\n# operate()\n# make the robotarm operate on keyboard-keys: LEFT, RIGHT and DOWN\n#\n# ######## creating and loading levels ########\n# \n# loadLevel(levelName)\n# loads a predefined level for levelName {string}\n# returns True if succeeded, returns False if failed\n# \n# loadMyLevel(yard, levelName) \n# loads a self made yard with a self made levelName {string}\n# where yard is a list of stacks each stack is a list of colors\n# box colors example of a yard: [['red','green'],['red','blue'],[],['green']]\n# returns True if succeeded, returns False if errors found, but sanitized\n#\n# randomLevel(stacks, layers)\n# loads a simple random level with stacks and layers\n#\n# loadRandomLevel(requirements )\n# loads a random level with optional requirements\n# requirements dictionary can contain key-values:\n# maxStacks {int}: number of random stacks to provide\n# minBoxes {int}: minmum number of boxes provided per stack\n# maxBoxes {int}: maximum number of boxes provided per stack\n# maxColors {int}: maximum number of colors provided in the yard\n# requiredColors {list of string}: list of required colors\n# levelName {string}: name of the level\n#\n# inspectYard()\n# prints the yard data, for inspection during debugging\n#\n# ###########################################################\n\nclass RobotArm:\n _colors = [\n {\"name\": 'white', 'code': (255,255,255)},\n {\"name\": 'red', 'code': (255,0,0)},\n {\"name\": 'green', 'code': (0,150,0)},\n {\"name\": 'blue', 'code': (0,0,255)},\n {\"name\": 'yellow', 'code': (255,255,0)}\n ]\n _defaultlevels = [\n {'name': 'exercise 1', 'yard' : [[],[\"red\"]]},\n {'name': 'exercise 2', 'yard' : [[\"blue\"],[],[],[],[\"blue\"],[],[],[\"blue\"]]},\n {'name': 'exercise 3', 'yard' : [[\"white\",\"white\",\"white\",\"white\"]]},\n {'name': 'exercise 4', 'yard' : [[\"blue\",\"white\", \"green\"]]},\n {'name': 'exercise 5', 'yard' : [[],[\"red\",\"red\",\"red\",\"red\",\"red\",\"red\",\"red\"]]},\n {'name': 'exercise 6', 'yard' : [[\"red\"],[\"blue\"],[\"white\"],[\"green\"],[\"green\"],[\"blue\"],[\"red\"],[\"white\"]]},\n {'name': 'exercise 7', 'yard' : [[],[\"blue\",\"blue\",\"blue\",\"blue\",\"blue\",\"blue\"], [],[\"blue\",\"blue\",\"blue\",\"blue\",\"blue\",\"blue\"], [],[\"blue\",\"blue\",\"blue\",\"blue\",\"blue\",\"blue\"], [],[\"blue\",\"blue\",\"blue\",\"blue\",\"blue\",\"blue\"],[],[\"blue\",\"blue\",\"blue\",\"blue\",\"blue\",\"blue\"]]},\n {'name': 'exercise 8', 'yard' : [[],[\"red\",\"red\",\"red\",\"red\",\"red\",\"red\",\"red\"]]},\n {'name': 'exercise 9', 'yard' : [[\"blue\"],[\"green\", \"green\"],[\"white\",\"white\",\"white\"],[\"red\",\"red\",\"red\",\"red\"]]},\n {'name': 'exercise 10', 'yard' : [[\"green\"],[\"blue\"],[\"white\"],[\"red\"],[\"blue\"]]},\n {'name': 'exercise 11', 'yard' : {'maxStacks': 9, 'minBoxes': 1, 'maxBoxes': 1, 'requiredColors': ['white'], 'maxColors': 4}},\n {'name': 'exercise 12', 'yard' : {'maxStacks': 9, 'minBoxes': 1, 'maxBoxes': 1, 'requiredColors': ['red'], 'maxColors': 4}},\n {'name': 'exercise 13', 'yard' : [[\"green\"],[\"green\"],[\"green\"],[\"blue\"],[\"white\"],[\"green\"],[\"red\"],[\"red\"],[\"blue\"],[\"green\"]]},\n {'name': 'exercise 14', 'yard' : [[],[\"green\"],[\"white\"],[\"green\",\"white\"],[\"red\",\"white\"],[\"white\",\"white\"],[\"blue\"],[\"blue\",\"blue\",\"blue\"],[\"blue\", \"green\", \"green\"],[\"red\"]]},\n {'name': 'exercise 15', 'yard' : [[],[\"blue\"],[],[\"blue\"],[\"white\"],[],[\"red\"],[\"green\"],[\"red\"],[\"green\"]]}\n ]\n _speeds = [{'fps': 100,'step': 1},{'fps': 150,'step': 2},{'fps': 250,'step': 4},{'fps': 400,'step': 5},{'fps': 500,'step': 10},{'fps': 500,'step': 20}]\n EMPTY = ''\n _backgroundColor = (200,200,200)\n _penColor = (0,0,0)\n _maxStacks = 10\n _maxLayers = 7\n _boxHeight = 29\n _boxWidth = 29\n _penWidth = 1\n _boxMargin = 2\n _armTopHeight = 15\n _bottomMargin = 2\n _idleAnimationTime = 300\n _screenMargin = 3\n _eventSleepTime = 300\n _eventActiveCycles = 100\n _iconImage = 'robotarm.ico'\n\n def __init__(self, levelName = ''):\n self._color = self.EMPTY\n self._stack = 0\n self._yardBottom = self._armTopHeight + (self._maxLayers + 1) * self._boxSpaceHeight() + self._penWidth\n self._armHeight = self._armTopHeight\n self._armX = 0\n self.speed = 1\n self._yard = []\n\n pygame.init()\n self._clock = pygame.time.Clock()\n \n self._screenWidth = self._stackX(self._maxStacks) + self._screenMargin\n self._screenHeight = self._layerY(-1) + self._bottomMargin + 2 * self._screenMargin\n self._screen = pygame.display.set_mode((self._screenWidth, self._screenHeight))\n\n try:\n programIcon = pygame.image.load(self._iconImage)\n pygame.display.set_icon(programIcon)\n except:\n print(self._iconImage + ' not found')\n\n # Load level at creation\n if levelName != '':\n self.loadLevel(levelName)\n\n########### ANIMATION METHODS ###########\n\n def _getColorCode(self, name):\n for c in self._colors:\n if c['name'] == name:\n return c['code']\n return False\n\n def _checkSpeed(self):\n speedInvalid = False\n if type(self.speed) is not int:\n speedInvalid = True\n if not (self.speed in range(len(self._speeds))):\n speedInvalid = True\n if speedInvalid:\n self.speed = 0 # reset speed to zero\n print('speed must be an integer between 0 and ' + str(len(self._speeds)-1))\n\n def _drawBoxAtPosition(self, x, y, color):\n pygame.draw.rect(self._screen, color, (x, y, self._boxWidth, self._boxHeight))\n pygame.draw.rect(self._screen, self._penColor, (x, y, self._boxWidth, self._boxHeight), self._penWidth)\n\n def _boxSpaceWidth(self):\n return (self._boxWidth + 2 * self._boxMargin) + self._penWidth\n\n def _stackX(self, stack):\n return self._screenMargin + self._boxMargin + stack * self._boxSpaceWidth() + self._penWidth\n\n def _boxSpaceHeight(self):\n return (self._boxHeight - self._penWidth)\n\n def _layerY(self,layer):\n return self._yardBottom - (layer + 1) * self._boxSpaceHeight() - self._screenMargin\n\n def _drawBox(self, stack, layer):\n x = self._stackX(stack) \n y = self._layerY(layer)\n color = self._getColorCode(self._yard[stack][layer])\n self._drawBoxAtPosition(x,y,color)\n\n def _drawStack(self, stack):\n for l in range(len(self._yard[stack])):\n self._drawBox(stack,l)\n x = self._stackX(stack) - self._boxMargin - self._penWidth\n y = self._layerY(-1) + self._bottomMargin\n\n pygame.draw.lines(self._screen, self._penColor, False, [(x, y - 5), (x, y), (x + self._boxSpaceWidth(), y), (x + self._boxSpaceWidth(), y - 5)])\n\n def _drawArm(self):\n xm = self._armX + int(self._boxSpaceWidth()/2) - self._boxMargin\n pygame.draw.line(self._screen, self._penColor, (xm, 2), (xm, self._armHeight - 2))\n pygame.draw.lines(self._screen, self._penColor, False, [\n (self._armX - self._boxMargin, self._armHeight + 2), \n (self._armX - self._boxMargin, self._armHeight - 2),\n (self._armX + self._boxWidth + self._penWidth, self._armHeight - 2),\n (self._armX + self._boxWidth + self._penWidth , self._armHeight + 2)])\n if self._color > '':\n self._drawBoxAtPosition(self._armX,self._armHeight,self._getColorCode(self._color))\n\n def _drawState(self):\n pygame.display.set_caption('Robotarm: ' + self._levelName)\n self._screen.fill(self._backgroundColor)\n for c in range(len(self._yard)):\n self._drawStack(c)\n self._drawArm()\n\n def _animate(self, *args):\n self._checkSpeed()\n self._armX = self._stackX(self._stack)\n\n if (args[0] == 'down'):\n self._armHeight = self._armTopHeight\n targetLayer = len(self._yard[self._stack])\n if self._color == '':\n targetLayer -= 1\n targetHeight = self._layerY(targetLayer)\n elif (args[0] == 'left'):\n targetX = self._stackX(self._stack - 1)\n elif (args[0] == 'right'):\n targetX = self._stackX(self._stack + 1)\n\n ready = False\n while not ready:\n if (args[0] == 'idle'):\n ready = True\n elif (args[0] == 'down'):\n ready = self._armHeight == targetHeight\n elif (args[0] == 'up'):\n ready = self._armHeight == self._armTopHeight\n elif (args[0] == 'left') or (args[0] == 'right'):\n ready = self._armX == targetX\n\n for event in pygame.event.get():\n self.checkCloseEvent(event)\n \n self._drawState()\n pygame.display.update()\n \n self._clock.tick(self._speeds[self.speed]['fps'])\n\n if (args[0] == 'down'):\n self._armHeight += self._speeds[self.speed]['step']\n if self._armHeight > targetHeight:\n self._armHeight = targetHeight\n elif (args[0] == 'up'):\n self._armHeight -= self._speeds[self.speed]['step']\n if self._armHeight < self._armTopHeight:\n self._armHeight = self._armTopHeight\n elif (args[0] == 'left'):\n self._armX -= self._speeds[self.speed]['step']\n if self._armX < targetX:\n self._armX = targetX\n elif (args[0] == 'right'):\n self._armX += self._speeds[self.speed]['step']\n if self._armX > targetX:\n self._armX = targetX\n elif (args[0] == 'idle'):\n pygame.time.delay(self._idleAnimationTime)\n \n ########### ROBOTARM MANIPULATION ###########\n \n def moveRight(self):\n success = False\n if self._stack < self._maxStacks - 1:\n self._animate('right')\n self._stack += 1\n success = True\n return success\n\n def moveLeft(self):\n success = False\n if self._stack > 0:\n self._animate('left')\n self._stack -= 1\n success = True\n return success\n\n def grab(self):\n success = False\n if self._color == self.EMPTY:\n self._animate('down')\n if len(self._yard[self._stack]) > 0:\n self._color = self._yard[self._stack][-1]\n self._yard[self._stack].pop(-1)\n success = True\n self._animate('up')\n return success\n\n def drop(self):\n success = False\n if self._color != self.EMPTY:\n if len(self._yard[self._stack]) < self._maxLayers:\n self._animate('down')\n self._yard[self._stack].append(self._color)\n self._color = self.EMPTY\n self._animate('up')\n success = True\n return success\n \n def scan(self):\n return self._color\n\n########### LEVEL & YARD lOADING & CREATION ###########\n\n def _checkYard(self,yard):\n success = True\n if type(yard) is not list:\n yard = []\n success = False\n for s in range(len(yard)):\n if type(yard[s]) is not list:\n yard[s] = []\n success = False\n for c in range(len(yard[s])):\n if self._getColorCode(yard[s][c]) == False:\n yard[s][c] = 'white'\n success = False\n return {'yard' : yard, 'success' : success}\n\n def loadMyLevel(self, yard, levelName = 'unknown level'):\n result = self._checkYard(yard)\n self._yard = result['yard'] # sanitized yard\n success = result['success'] # where there errors?\n while len(self._yard) < self._maxStacks:\n self._yard.append([])\n self._levelName = levelName\n self._animate('idle')\n \n return success\n \n def loadLevel(self, levelName):\n success = False\n for level in self._defaultlevels:\n if levelName == level['name']:\n if type(level['yard']) is dict:\n level['yard']['levelName'] = levelName\n self.loadRandomLevel(level['yard'])\n else:\n self.loadMyLevel(level['yard'], levelName)\n success = True\n if not success:\n self.loadMyLevel([])\n return success\n\n def _requiredColorsFound(self, yard, requiredColors):\n colors = []\n for stack in yard:\n for color in stack:\n colors.append(color)\n for color in requiredColors:\n if colors.count(color) == 0:\n return False\n return True\n\n def _createRandomYard(self, maxStacks, minBoxes, maxBoxes, colors, maxColors, requiredColors):\n yard = []\n while len(yard) == 0 or not self._requiredColorsFound(yard, requiredColors):\n yard = []\n for l in range(maxStacks):\n random.seed()\n stack = []\n height = random.randint(minBoxes, maxBoxes)\n for b in range(height):\n color = colors[random.randint(0,len(colors)-1)]\n stack.append(color)\n yard.append(stack)\n return yard \n\n def _randomColors(self, requiredColors, maxColors):\n colors = []\n for color in requiredColors:\n if not color in colors:\n colors.append(color)\n while len(colors) < maxColors:\n color = self._colors[random.randint(0,len(self._colors)-1)]['name']\n if not color in colors:\n colors.append(color)\n return colors\n \n def loadRandomLevel(self, requirements = {}):\n maxStacks = requirements['maxStacks'] if 'maxStacks' in requirements else 6\n maxStacks = self._maxStacks if maxStacks > self._maxStacks else maxStacks\n minBoxes = requirements['minBoxes'] if 'minBoxes' in requirements else 1\n maxBoxes = requirements['maxBoxes'] if 'maxBoxes' in requirements else 3\n maxBoxes = self._maxLayers if maxBoxes > self._maxLayers else maxBoxes\n requiredColors = requirements['requiredColors'] if 'requiredColors' in requirements else []\n levelName = requirements['levelName'] if 'levelName' in requirements else 'random level'\n maxColors = requirements['maxColors'] if 'maxColors' in requirements else 4\n\n colors = self._randomColors(requiredColors, maxColors)\n myYard = self._createRandomYard(maxStacks, minBoxes, maxBoxes, colors, maxColors, requiredColors)\n self.loadMyLevel(myYard, levelName)\n\n def randomLevel(self, stacks, layers):\n self.loadRandomLevel({'maxStacks': stacks, 'maxBoxes': layers})\n\n def inspectYard(self):\n print(self._yard)\n\n########### EVENT HANDLING ###########\n\n def checkCloseEvent(self,event):\n if event.type == pygame.QUIT:\n sys.exit() \n\n def _defaultHandler(self, events):\n for event in events: \n self.checkCloseEvent(event)\n\n def wait(self, handler = False):\n cycle = 0\n while True:\n events = pygame.event.get() # get latest events\n if callable(handler):\n handler(events)\n self._defaultHandler(events)\n if len(events) > 0: # events happened?\n cycle = 0 # stay awake and alert\n\n cycle += 1 # prepare for sleep\n\n if cycle > self._eventActiveCycles: # after 30 cycles \n pygame.time.delay(self._eventSleepTime) # go asleep for 300 milliseconds, give the processor some rest\n cycle = 0 # wake up for events during sleep\n\n def _operator(self, instructions):\n for instruction in instructions:\n if instruction.type == pygame.KEYDOWN:\n if instruction.key == pygame.K_LEFT:\n self.moveLeft()\n if instruction.key == pygame.K_RIGHT:\n self.moveRight()\n if instruction.key == pygame.K_DOWN:\n if self.scan() == '':\n self.grab()\n else:\n self.drop()\n\n def operate(self):\n self.wait(self._operator)\n" }, { "alpha_fraction": 0.6089030504226685, "alphanum_fraction": 0.6216216087341309, "avg_line_length": 25.25, "blob_id": "b53938a23fa3c34b6f6a471af08e57e4135edc2f", "content_id": "bf23225f707a0c8010d445aad5e482c911b09247", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 629, "license_type": "no_license", "max_line_length": 65, "num_lines": 24, "path": "/example12.py", "repo_name": "AkashRamlal1/robotarm", "src_encoding": "UTF-8", "text": "from RobotArm import RobotArm\n\nrobotArm = RobotArm('exercise 12')\n\n# Jouw python instructies zet je vanaf hier:\n\nkleur = \"red\"\n# Na jouw code wachten tot het sluiten van de window:\n\nfor blok in range(9):\n robotArm.moveRight();\nfor blok in range(9): # hoeveel blokken zijn er? geen 27!\n robotArm.grab()\n kleur = robotArm.scan()\n print(kleur)\n if kleur == \"red\":\n for i in range(blok + 2): # gebruik hier niet de var blok\n robotArm.moveRight();\n robotArm.drop()\n for i in range(blok + 3): \n robotArm.moveLeft()\n else:\n robotArm.drop()\n robotArm.moveLeft();" } ]
10
SoundNandu/May-LeetCode-Challenge-2020
https://github.com/SoundNandu/May-LeetCode-Challenge-2020
442e9eec344356a1c6b404939dd92b006bb2feba
73ee6c8851cebedce1c72ecec08aeb274f60da6e
3d35266b81eee21f7b90779b746489789640db22
refs/heads/master
2022-06-02T23:52:08.342247
2020-05-04T18:04:38
2020-05-04T18:04:38
260,564,972
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5757575631141663, "alphanum_fraction": 0.6060606241226196, "avg_line_length": 21, "blob_id": "f3662b6ca3dcd93de58c580c0a0eb21706d84870", "content_id": "05f7d4bc7f2b1f9c7cb4a82038df7771d0960658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 132, "license_type": "no_license", "max_line_length": 29, "num_lines": 6, "path": "/README.md", "repo_name": "SoundNandu/May-LeetCode-Challenge-2020", "src_encoding": "UTF-8", "text": "# May-LeetCode-Challenge\n\n##### 1. First Bad Version \n##### 2.Jewels and Stones \n##### 3.Ransom Note\n##### 4.NumberComplement\n" }, { "alpha_fraction": 0.6045258641242981, "alphanum_fraction": 0.610991358757019, "avg_line_length": 25.794116973876953, "blob_id": "b61589148e1da4676631a8681073d6e2ef1d6db0", "content_id": "9856453b09ab447f66e13110f522167ce8a3e610", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 928, "license_type": "no_license", "max_line_length": 229, "num_lines": 34, "path": "/JewelsandStones.py", "repo_name": "SoundNandu/May-LeetCode-Challenge-2020", "src_encoding": "UTF-8", "text": "Jewels and Stones\n\n\nYou're given strings J representing the types of stones that are jewels, and S representing the stones you have. Each character in S is a type of stone you have. You want to know how many of the stones you have are also jewels.\n\nThe letters in J are guaranteed distinct, and all characters in J and S are letters. Letters are case sensitive, so \"a\" is considered a different type of stone from \"A\".\n\nExample 1:\n\nInput: J = \"aA\", S = \"aAAbbbb\"\nOutput: 3\n\nExample 2:\n\nInput: J = \"z\", S = \"ZZ\"\nOutput: 0\n\nTime : O(n) Space : O(n)\n\n\nclass Solution(object):\n def numJewelsInStones(self, J, S):\n \"\"\"\n :type J: str\n :type S: str\n :rtype: int\n \"\"\"\n jewels = set (J)\n count = 0\n #check wther stones in jewels if \"Yes\" increment count\n for stones in S:\n if stones in jewels:\n count +=1\n return count\n \n" }, { "alpha_fraction": 0.6364985108375549, "alphanum_fraction": 0.6691394448280334, "avg_line_length": 20.0625, "blob_id": "22472db63db70136bd6935371261630f9bbaae2e", "content_id": "7dec03abd00e27ef5435a640ec24ca30db7696a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 674, "license_type": "no_license", "max_line_length": 129, "num_lines": 32, "path": "/NumberComplement.py", "repo_name": "SoundNandu/May-LeetCode-Challenge-2020", "src_encoding": "UTF-8", "text": "Number Complement\n\n\nGiven a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.\n\n \n\nExample 1:\n\nInput: 5\nOutput: 2\nExplanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.\n\n \n\nExample 2:\n\nInput: 1\nOutput: 0\nExplanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.\n\n\nTime : O(1) | Space : O(1)\n\n#flip bit by bit\nclass Solution(object):\n def findComplement(self, num):\n i = 1\n while num >= i:\n num ^= i\n i <<= 1\n return num\n" } ]
3
yucheng0302/angularjs
https://github.com/yucheng0302/angularjs
e7e5f53eb623960a2e6367322854377f0482aee1
55fca1f72117fbb28acce41afba792c5bfb7f1b3
65083b3c767c14adddaa18f11b7f1af3ee8fa821
refs/heads/master
2021-01-18T23:22:08.423171
2016-05-13T23:44:50
2016-05-13T23:44:50
39,613,434
0
0
null
2015-07-24T05:27:28
2016-05-10T12:21:50
2016-05-10T12:34:33
Python
[ { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 39, "blob_id": "90315453b4e60b132929d7fd9d467d9d46917826", "content_id": "5a1b3f014605e3f9781cbeef16a7c7dff7f6eb34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 39, "num_lines": 1, "path": "/hello.py", "repo_name": "yucheng0302/angularjs", "src_encoding": "UTF-8", "text": "print \"This line should NOT be printed\"\n" } ]
1
mennaML/IFT6135H19_assignment3
https://github.com/mennaML/IFT6135H19_assignment3
c7047147c36db343566201ca9630a95c05c20ec4
14e37e30eff9f65e96fec10d44d820d0bbe7bd8d
ceb46691369d651c498311e05bdf11570dfd67be
refs/heads/master
2020-05-09T11:34:28.844748
2019-04-23T17:46:16
2019-04-23T17:46:16
181,085,346
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5960469245910645, "alphanum_fraction": 0.6065472364425659, "avg_line_length": 22.478260040283203, "blob_id": "6ff4c6c232bdf0e78646953be8ff7962927c5141", "content_id": "5e5ac91b897438c4da650afc91e9fe1c15c8d226", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1619, "license_type": "no_license", "max_line_length": 80, "num_lines": 69, "path": "/Question 2/dataset.py", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "from torch.utils.data import Dataset\nimport os\nimport torch\nfrom PIL import Image\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\nclass Binary_MNIST_DS(Dataset):\n \"\"\" Binary MNIST Dataset.\n \"\"\"\n\n def __init__(self, path_file, transform=None):\n\n self.transform = transform\n\n #self.train = train # training set or test set\n\n if not os.path.exists(path_file):\n raise RuntimeError('Dataset not found @ {}!'.format(self.path_file))\n\n self.data = np.loadtxt(path_file)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n tuple: (image, target) where target is index of the target class.\n \"\"\"\n img = self.data[index]\n\n img = img.reshape((28, 28)).astype('uint8')*255\n\n img = Image.fromarray(img)\n\n if self.transform is not None:\n img = self.transform(img)\n\n return img\n\n def __len__(self):\n return len(self.data)\n\n\n\n\n#dataset = Binary_MNIST_DS('data/binarized_mnist_train.amat', transform=None)\n#plt.imshow(dataset[0])\n#plt.show()\n\n'''\ndata = np.loadtxt('data/pr_mean.csv', delimiter=',')\nplt.plot(data[:,1], data[:,2], label='PR Mean')\ndata = np.loadtxt('data/rt_mean.csv', delimiter=',')\nplt.plot(data[:,1], data[:,2], label='RT Mean')\ndata = np.loadtxt('data/rr_stdev.csv', delimiter=',')\nplt.plot(data[:,1], data[:,2], label='RR Std Dev.')\ndata = np.loadtxt('data/user_id.csv', delimiter=',')\nplt.plot(data[:,1], data[:,2], label='User ID')\n\nplt.ylabel('Metric value')\nplt.xlabel('Epochs')\n\nplt.legend(loc='lowe right')\n\nplt.show()\n'''" }, { "alpha_fraction": 0.5600000023841858, "alphanum_fraction": 0.653333306312561, "avg_line_length": 14.199999809265137, "blob_id": "f0a0d073e8beec6d77db6e3dd2bbb402cea6540d", "content_id": "614ffe476d1d01280ca2717bc69d09193a80f6f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 75, "license_type": "no_license", "max_line_length": 37, "num_lines": 5, "path": "/README.md", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "# IFT6135H19_assignment3\n\nVAE vs GAN\n\nTeam - Akila Jeeson Daniel, Myriam Laiymani, Marc-André Piché, Khalil Slimi" }, { "alpha_fraction": 0.5429864525794983, "alphanum_fraction": 0.5506787300109863, "avg_line_length": 35.83333206176758, "blob_id": "427a68b898fe7a620b461f29dfbe85079cc1fe08", "content_id": "4105c2b8bf032535cf5b1bfbcebe741c79dfa7e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2210, "license_type": "no_license", "max_line_length": 87, "num_lines": 60, "path": "/Question 2/importance_sampling.py", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "import numpy as np\nimport torch\nimport torch.distributions as tdist\nfrom torch.nn import functional as F\n\n# Generates z_ik, log_p_zik and log_q_zik\ndef generate_samples(model, x_batch, num_samples, device):\n\n mean_batch, logvar_batch = model.encode(x_batch)\n \n mv_normal = tdist.multivariate_normal.MultivariateNormal\n \n p_z_dist = mv_normal(torch.zeros(mean_batch.size(1)).to(device), \n torch.eye(mean_batch.size(1)).to(device))\n \n z_samples = torch.empty((mean_batch.size(0), \n num_samples, mean_batch.size(1)), device=device)\n log_q_z = torch.empty((mean_batch.size(0), num_samples), device=device)\n log_p_z = torch.empty((mean_batch.size(0), num_samples), device=device)\n \n for i in range(len(x_batch)):\n q_z_dist = mv_normal(mean_batch[i], \n torch.diag(torch.exp(0.5*logvar_batch[i])))\n z_i = q_z_dist.sample((num_samples,))\n log_q_z_i = q_z_dist.log_prob(z_i)\n log_p_z_i = p_z_dist.log_prob(z_i)\n \n z_samples[i] = z_i\n log_q_z[i] = log_q_z_i\n log_p_z[i] = log_p_z_i\n \n\n return z_samples, log_p_z, log_q_z\n\n# Calculates the importance sampling approximation of log_p_x over a batch\ndef estimate_batch_log_density(model, x_batch, num_samples, device):\n\n z_samples, log_p_z, log_q_z = generate_samples(model, x_batch, num_samples, device)\n\n result = torch.empty((len(x_batch), ), device=device)\n for i in range(len(x_batch)):\n\n x_predict = model.decode(z_samples[i])\n log_p_z_i = log_p_z[i]\n log_q_z_i = log_q_z[i]\n \n log_p_x_z_i = torch.empty((num_samples, ), device=device)\n \n for k in range(num_samples):\n log_p_x_z_ik = -F.binary_cross_entropy(x_predict[k].view(-1, 784), \n x_batch[i].view(-1, 784), \n reduction='sum') \n log_p_x_z_i[k] = log_p_x_z_ik.item()\n \n logsum = log_p_x_z_i + log_p_z_i - log_q_z_i\n \n logpx = -np.log(num_samples) + torch.logsumexp(logsum, 0)\n result[i] = logpx\n\n return result\n" }, { "alpha_fraction": 0.6258783340454102, "alphanum_fraction": 0.6384783387184143, "avg_line_length": 30.25757598876953, "blob_id": "37370e9bee780714bb0c70287d273a5baac147f5", "content_id": "d518737059c65b41e2f584eadb119c2619ed508b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4127, "license_type": "no_license", "max_line_length": 111, "num_lines": 132, "path": "/Question 2/VAE_trainer.py", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "import torch\nfrom dataset import Binary_MNIST_DS\nfrom VAE_model import VAE\nfrom torch.utils.data import DataLoader\nfrom torch import nn, optim\nfrom torch.nn import functional as F\nfrom torchvision import datasets, transforms\nfrom tqdm import tqdm\nimport random\nimport os\n\nimport numpy as np\nimport importance_sampling as sampler\n\n\n\nnum_epochs = 20\nbatch_size = 32\nlearning_rate = 3*(10**-4)\n \n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nkwargs = {'num_workers': 4, 'pin_memory': True} if torch.cuda.is_available() else {}\n\ntrain_dataset = Binary_MNIST_DS('data/binarized_mnist_train.amat', transform=transforms.ToTensor())\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)\n\nvalid_dataset = Binary_MNIST_DS('data/binarized_mnist_valid.amat', transform=transforms.ToTensor())\nvalid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\ntest_dataset = Binary_MNIST_DS('data/binarized_mnist_test.amat', transform=transforms.ToTensor())\ntest_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)\n\n\ndef fix_seed(seed):\n '''\n Fix the seed.\n\n Parameters\n ----------\n seed: int\n The seed to use.\n\n '''\n print('pytorch/random seed: {}'.format(seed))\n np.random.seed(seed)\n random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n# Reconstruction + KL divergence \ndef loss_function(recon_x, x, mu, logvar):\n \n #loss = - ELBO = BCE + KLD\n \n BCE = F.binary_cross_entropy(recon_x.view(-1, 784), x.view(-1, 784), reduction='sum')\n\n # See https://arxiv.org/abs/1312.6114 - Appendix B\n # -KLD = 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n return BCE + KLD\n\n\ndef train(model, optimizer, epoch):\n model.train()\n train_loss = 0\n for batch_idx, data in enumerate(tqdm(train_loader)):\n data = data.to(device)\n optimizer.zero_grad()\n reconstructed_batch, mu, logvar = model(data)\n loss = loss_function(reconstructed_batch, data, mu, logvar)\n loss.backward()\n train_loss += loss.item()\n optimizer.step()\n\n print('====> Epoch: {} Average loss: {:.4f}'.format(\n epoch, train_loss / len(train_loader.dataset)))\n \n\ndef evaluate_elbo(model, dataloader, epoch=0, verbose=False):\n\n model.eval()\n test_loss = 0\n with torch.no_grad():\n for i, data in enumerate(tqdm(dataloader)):\n data = data.to(device)\n reconstructed_batch, mu, logvar = model(data)\n l = loss_function(reconstructed_batch, data, mu, logvar).item()\n #print('loss_function', l)\n test_loss += l\n test_loss /= len(dataloader.dataset)\n elbo = -test_loss\n if verbose:\n print('====> Epoch: {} Average ELBO: {:.4f}'.format(epoch, elbo))\n \n return -test_loss\n\ndef estimate_log_density(model, data_loader, num_samples):\n \n model.eval()\n batches = []\n with torch.no_grad():\n for i, batch in enumerate(tqdm(data_loader)):\n batch = batch.to(device)\n batches.append(sampler.estimate_batch_log_density(model, batch, num_samples, device).cpu().numpy())\n all_log_p_x = np.concatenate(batches)\n return np.mean(all_log_p_x)\n\nif __name__ == \"__main__\":\n \n fix_seed(1)\n \n model = VAE().to(device)\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n \n for epoch in range(1, num_epochs + 1):\n train(model, optimizer, epoch)\n evaluate_elbo(model, valid_loader, epoch, verbose=True)\n\n logpx_valid = estimate_log_density(model, valid_loader, 200)\n print(\"====> Validation set log p(x) approximation: %.2f\" % logpx_valid)\n \n elbo_test = evaluate_elbo(model, test_loader)\n print('====> Test set ELBO: {:.4f}'.format(elbo_test))\n \n logpx_test = estimate_log_density(model, test_loader, 200)\n print(\"====> Test set log p(x) approximation: %.2f\" % logpx_test)\n\n" }, { "alpha_fraction": 0.5047438144683838, "alphanum_fraction": 0.5545541048049927, "avg_line_length": 29.114286422729492, "blob_id": "4b1dd1502877b4c1a720fc57866214b1925e78fc", "content_id": "90941fad18267a07dd1af4a80d0e96ecf3e5a868", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2108, "license_type": "no_license", "max_line_length": 86, "num_lines": 70, "path": "/Question 2/VAE_model.py", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nclass VAE(nn.Module):\n def __init__(self):\n super(VAE, self).__init__()\n \n self.conv1 = nn.Conv2d(1, 32, kernel_size=3)\n self.conv2 = nn.Conv2d(32, 64, kernel_size=3)\n self.conv3 = nn.Conv2d(64, 256, kernel_size=5)\n self.conv4 = nn.Conv2d(256, 64, kernel_size=5, padding=4)\n self.conv5 = nn.Conv2d(64, 32, kernel_size=3, padding=2)\n self.conv6 = nn.Conv2d(32, 16, kernel_size=3, padding=2)\n self.conv7 = nn.Conv2d(16, 1, kernel_size=3, padding=2)\n \n self.ELU = nn.ELU()\n self.AvgPool2d = nn.AvgPool2d(kernel_size=2, stride=2)\n self.enc_fc_mu = nn.Linear(256, 100)\n self.enc_fc_logvar = nn.Linear(256, 100)\n self.dec_fc = nn.Linear(100, 256)\n\n self.upsamplingx2 = F.interpolate#nn.UpsamplingBilinear2d(scale_factor=2)\n\n def encode(self, x):\n x = self.conv1(x)\n x = self.ELU(x)\n x = self.AvgPool2d(x)\n x = self.conv2(x)\n x = self.ELU(x)\n x = self.AvgPool2d(x)\n x = self.conv3(x)\n x = self.ELU(x)\n return self.enc_fc_mu(x.squeeze()), self.enc_fc_logvar(x.squeeze())\n\n def reparametrize(self, mu, logvar):\n std = torch.exp(0.5*logvar)\n eps = torch.randn_like(std)\n return mu + eps*std \n\n def decode(self, z):\n\n # z 100x1\n x = self.dec_fc(z)\n x = self.ELU(x)\n \n x = x.view(x.size(0), x.size(1), 1, 1)\n \n x = self.conv4(x)\n x = self.ELU(x)\n\n x = self.upsamplingx2(x, scale_factor=2, mode='bilinear', align_corners=False)\n\n x = self.conv5(x)\n x = self.ELU(x)\n\n x = self.upsamplingx2(x, scale_factor=2, mode='bilinear', align_corners=False)\n\n x = self.conv6(x)\n x = self.ELU(x)\n\n x = self.conv7(x)\n # x 28x28\n return torch.sigmoid(x)\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparametrize(mu, logvar)\n return self.decode(z), mu, logvar\n" }, { "alpha_fraction": 0.5202035903930664, "alphanum_fraction": 0.5416409373283386, "avg_line_length": 28.74770736694336, "blob_id": "79e624ddc81d83961ee020619f16fef531a9038a", "content_id": "7652c7123881a820479208cd097e135794a51f4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6484, "license_type": "no_license", "max_line_length": 99, "num_lines": 218, "path": "/Question 1/GANQ1.py", "repo_name": "mennaML/IFT6135H19_assignment3", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport samplers as sampler\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\ndef KL(p, q):\n if np.sum(p)!=1 or np.sum(q)!=1 :\n raise Exception(\"input in not a valid pdf\")\n\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n\n\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))\n\ndef JS(p, q):\n return 0.5*(KL(p, (p+q)/2) + KL(q, (p+q)/2))\n\ndef JSD_loss_function_q4(Dx, Dy):\n return -1*(torch.mean(torch.log(Dx)) + torch.mean(torch.log(1 - Dy)))\n\ndef JSD_loss_function(Dx, Dy):\n return -1*(torch.mean(torch.log(Dx)) + torch.mean(torch.log(1 - Dy)))\n\ndef WD_loss_function(Tx, Ty, gradient_penalty=True):\n return -1*(torch.mean(Tx) - torch.mean(Ty))\n\n###############################################################################\n#\n# Discriminator Class\n#\n###############################################################################\n\nclass Discriminator(nn.Module):\n def __init__(self, input_size, hidden_size):\n super(Discriminator, self).__init__()\n self.layers = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(hidden_size, hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(hidden_size, hidden_size//2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(hidden_size//2, 1),\n nn.Sigmoid(), \n )\n self.weights_init()\n def weights_init(m):\n if isinstance(m, nn.Linear):\n torch.nn.init.xavier_uniform(m.weight.data)\n torch.nn.init.xavier_uniform(m.bias.data)\n \n def forward(self, x):\n x = self.layers(x)\n return x\n\n\n\ndef train(model, loss_fn, num_epochs, batch_size, phi, lmdba=10):\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n p_gen = sampler.distribution1(0, batch_size=batch_size)\n q_generator = sampler.distribution1(phi, batch_size=batch_size)\n a_generator = sampler.distribution2(batch_size=batch_size)\n\n losses = []\n for epoch in range(num_epochs):\n model.train()\n optimizer.zero_grad()\n \n x = torch.tensor(next(p_gen), dtype=torch.float)\n y = torch.tensor(next(q_generator), dtype=torch.float)\n \n x.to(device)\n y.to(device)\n\n Dx = model(Variable(x))\n Dy = model(Variable(y))\n loss = loss_fn(Dx, Dy)\n\n if loss_fn == WD_loss_function:\n #sample a and compute z\n a = torch.tensor(next(a_generator), dtype=torch.float) \n z = a*x + (1-a)*y\n \n z.requires_grad_(True)\n Dz = model(z)\n Dz.requires_grad_(True)\n loss.backward(retain_graph=True, create_graph=True)\n\n grad_Tz = torch.autograd.grad(Dz.sum(), z, create_graph=True)[0]\n norm_gradient = torch.norm(grad_Tz, p=2, dim=-1)\n\n penalty = (norm_gradient - 1).pow(2).mean()\n loss += lmdba*penalty\n\n #loss.backward(retain_graph=True, create_graph=True)\n \n \n loss.backward()\n\n losses.append(loss)\n optimizer.step()\n \n if( epoch % int(num_epochs/10)) == (int(num_epochs/10)-1) :\n print( \"Epoch %6d. Loss %5.3f\" % ( epoch+1, loss ) )\n \n print( \"Training complete for discriminator with parameter phi=\", phi)\n return model\n\ndef eval(model, batch_size, phi):\n q_generator = sampler.distribution1(phi, batch_size=batch_size)\n\n model.eval()\n q = torch.tensor(next(q_generator), dtype=torch.float)\n\n return model(q)\n\n\ndef train_q4(model, loss_fn, num_epochs, batch_size, f0_samples, f1_samples):\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\n p_gen = sampler.distribution4(batch_size=batch_size)\n q_gen = sampler.distribution3(batch_size=batch_size)\n\n losses = []\n for epoch in range(num_epochs):\n model.train()\n optimizer.zero_grad()\n\n x = torch.tensor(next(p_gen), dtype=torch.float)\n y = torch.tensor(next(q_gen), dtype=torch.float)\n\n x.to(device)\n y.to(device)\n\n Dx = model(Variable(x))\n Dy = model(Variable(y))\n loss = loss_fn(Dx, Dy)\n\n loss.backward()\n\n losses.append(loss)\n optimizer.step()\n\n if (epoch % int(num_epochs / 10)) == (int(num_epochs / 10) - 1):\n print(\"Epoch %6d. Loss %5.3f\" % (epoch + 1, loss))\n\n print(\"Training complete for discriminator (Q1.4)\",)\n return model\n\n\ndef question_1_3_JSD():\n phi = np.arange(-1, 1.1, step=0.1)\n eval_size = 100\n uniformsJSD = []\n Dy = torch.zeros((eval_size, 21))\n # training\n for i in range(21):\n D = Discriminator(input_size=2, hidden_size=64)\n D = train(model=D, loss_fn=JSD_loss_function, num_epochs=10000, batch_size=512, phi=phi[i])\n Dy[:, i:i + 1] = eval(D, batch_size=eval_size, phi=phi[i])\n\n # computing the loss\n for i in range(21):\n uniformsJSD.append(np.log(2) - 0.5 * JSD_loss_function(Dy[:, 10], Dy[:, i]))\n\n plt.plot(phi, uniformsJSD)\n plt.xlabel('Phi')\n plt.ylabel('JSD')\n plt.show()\n\n\ndef question_1_3_WSD():\n phi = np.arange(-1, 1.1, step=0.1)\n uniformsWD = []\n eval_size = 100\n Dy = torch.zeros((eval_size, 21))\n\n # training\n for i in range(21):\n D = Discriminator(input_size=2, hidden_size=64)\n D.to(device)\n D = train(model=D, loss_fn=WD_loss_function, num_epochs=6000, batch_size=512, phi=phi[i])\n Dy[:, i:i + 1] = eval(D, batch_size=eval_size, phi=phi[i])\n\n # computing the loss\n for i in range(21):\n uniformsWD.append(-WD_loss_function(Dy[:, 10], Dy[:, i]))\n\n np.save('WSD_plt_data.npy', uniformsWD)\n\n plt.plot(phi, uniformsWD)\n plt.xlabel('Phi')\n plt.ylabel('WD')\n plt.show()\n\n###############################################################################\n#\n# Q1.3: training discriminator on JSD and WD loss\n#\n###############################################################################\n\nif __name__ == '__main__':\n\n question_1_3_JSD()\n question_1_3_WSD()\n\n\n###############################################################################\n#\n# Q1.4: Implemented in density_estimation.py\n#\n###############################################################################" } ]
6
williamegomezo/coursera-downloader
https://github.com/williamegomezo/coursera-downloader
a1219406c98de28b3d57e8b8fca1ae87bfda45c1
d783ab38e28411c26ea85a1bdf040cd48e6fa53d
2e3b7d2d4646beee5755c83759ccf1695db3eac8
refs/heads/master
2020-04-25T01:33:07.993642
2019-02-27T04:06:52
2019-02-27T04:06:52
172,411,593
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6739130616188049, "avg_line_length": 26.700000762939453, "blob_id": "47a354470929fda4bf20934019917ecd72d16b82", "content_id": "fe24428b4033b0e51bfec2aff6e04bbcbcdca584", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 70, "num_lines": 10, "path": "/page_utils.py", "repo_name": "williamegomezo/coursera-downloader", "src_encoding": "UTF-8", "text": "import time\n\ndef wait_for(condition_function, timeout):\n start_time = time.time()\n while time.time() < start_time + timeout:\n if condition_function():\n return True\n else:\n time.sleep(0.1)\n raise Exception('Timeout waiting for' + condition_function.__name__)" }, { "alpha_fraction": 0.5529939532279968, "alphanum_fraction": 0.5554975867271423, "avg_line_length": 32.63508605957031, "blob_id": "6af5a49d2e2ef7f1675b8cf325e1a5b328d2c79f", "content_id": "753545451b8860c569df603fa83c1deee30ed27e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9586, "license_type": "no_license", "max_line_length": 109, "num_lines": 285, "path": "/scrapper.py", "repo_name": "williamegomezo/coursera-downloader", "src_encoding": "UTF-8", "text": "from requests import get\nfrom bs4 import BeautifulSoup\nfrom contextlib import closing\nfrom requests.exceptions import RequestException\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport json\nfrom os import mkdir, path, getenv\nfrom dotenv import load_dotenv\nimport wget\nimport ssl\nfrom page_utils import wait_for\nfrom urllib.parse import parse_qs\n\nload_dotenv()\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None.\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns True if the response seems to be HTML, False otherwise.\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef log_error(e):\n \"\"\"\n It is always a good idea to log errors.\n This function just prints them, but you can\n make it do anything.\n \"\"\"\n print(e)\n\n\nclass SeleniumCoursera:\n def __init__(self, driver, subdriver, timeout):\n self.driver = driver\n self.subdriver = subdriver\n self.timeout = timeout\n self.old_current_id = 0\n self.old_subcurrent_id = 0\n\n self.coursera_base = 'https://www.coursera.org/learn/'\n\n def driver_change(self, new_path):\n self.driver.get(new_path)\n wait_for(self.driver_loaded, self.timeout)\n self.old_current_id = self.current_id\n print('Main', new_path, self.current_id)\n\n def subdriver_change(self, new_path):\n self.subdriver.get(new_path)\n wait_for(self.subdriver_loaded, self.timeout)\n self.old_subcurrent_id = self.subcurrent_id\n print('Sub', new_path, self.subcurrent_id)\n\n def driver_loaded(self):\n self.current_id = self.driver.find_element_by_tag_name('html').id\n return self.old_current_id != self.current_id\n\n def subdriver_loaded(self):\n self.subcurrent_id = self.subdriver.find_element_by_tag_name('html').id\n return self.old_subcurrent_id != self.subcurrent_id\n\n def week_loaded(self):\n try:\n self.driver.find_element_by_class_name('rc-NamedItemList')\n if self.driver.current_url.split('/')[-1] == str(self.week):\n return True\n else:\n return False\n except:\n return False\n\n def video_loaded(self):\n try:\n self.subdriver.find_element_by_id('c-video_html5_api')\n return True\n except:\n return False\n\n def login(self, email, password):\n if path.isfile('temp/cookies.json'):\n cookies = []\n self.driver_change('https://www.coursera.org')\n with open('temp/cookies.json') as json_file:\n self.cookies = json.load(json_file)\n\n for cookie in self.cookies:\n self.driver.add_cookie(cookie)\n\n self.set_sub_driver()\n\n return True\n\n self.driver_change('https://www.coursera.org/?authMode=login')\n inputs = self.driver.find_elements_by_tag_name('input')\n buttons = self.driver.find_elements_by_tag_name('button')\n\n input_email = self.find_by_attribute(inputs, 'type', 'email')\n input_email.send_keys(email)\n\n input_password = self.find_by_attribute(inputs, 'type', 'password')\n input_password.send_keys(password)\n\n button_login = self.find_by_attribute(\n buttons, 'data-courselenium', 'login-form-submit-button')\n button_login.click()\n\n input(\"Confirm that you are not a robot in the driver. Once logged in, Press Enter to continue...\")\n cookies_list = self.driver.get_cookies()\n\n try:\n mkdir('temp')\n except:\n pass\n\n with open('temp/cookies.json', 'w') as file:\n json.dump(cookies_list, file)\n\n self.set_sub_driver()\n\n def set_sub_driver(self):\n self.subdriver_change('https://www.coursera.org')\n with open('temp/cookies.json') as json_file:\n self.cookies = json.load(json_file)\n\n for cookie in self.cookies:\n self.subdriver.add_cookie(cookie)\n self.subdriver_change('https://www.coursera.org')\n\n @staticmethod\n def find_by_attribute(list_elements, attribute, value):\n for element in list_elements:\n if element.get_attribute(attribute) == value:\n return element\n return None\n\n def download_courses(self, courses):\n for course in courses:\n self.download_course(course)\n\n def download_course(self, course):\n self.create_folder('downloads')\n has_week = True\n week = 1\n self.week = week\n\n week_base = '/home/week/'\n while(has_week):\n self.driver.get(self.coursera_base +\n course + week_base + str(week))\n try:\n wait_for(self.week_loaded, self.timeout)\n except:\n has_week = False\n break\n\n print('Week page loaded')\n\n titles = self.driver.find_elements_by_class_name(\n 'rc-NamedItemList')\n\n if len(titles) == 0:\n has_week = False\n break\n\n for i, title in enumerate(titles):\n title_text = title.find_element_by_tag_name('h3').text\n folder_name = str(i) + '_' + self.format_name(title_text)\n print(folder_name)\n items = title.find_elements_by_tag_name('li')\n for j, item in enumerate(items):\n try:\n item_type = item.find_element_by_tag_name(\n 'strong').text\n if \"Video:\" in item_type:\n name = item.find_element_by_class_name(\n 'rc-WeekItemName').text\n href = item.find_element_by_tag_name(\n 'a').get_attribute('href')\n name = str(j) + '_' + self.format_name(name)\n self.download_video(course, str(\n week), folder_name, name, href)\n except:\n pass\n week += 1\n self.week += 1\n\n @staticmethod\n def format_name(name):\n return name.replace(':', '_').replace('?', '').replace('\\n', '_').replace('-', '_').replace('/', '_')\n\n def download_video(self, course, week, folder_name, video, href):\n self.subdriver.get(href)\n try:\n wait_for(self.video_loaded, self.timeout)\n except:\n return False\n video_links = []\n subtitles_links = []\n try:\n video_element = self.subdriver.find_element_by_id(\n 'c-video_html5_api')\n video_links = video_element.find_elements_by_tag_name('source')\n except:\n pass\n try:\n video_element = self.subdriver.find_element_by_id(\n 'c-video_html5_api')\n subtitles_links = video_element.find_elements_by_tag_name('track')\n except:\n pass\n\n if len(video_links) > 0:\n for link in video_links:\n extension = link.get_attribute('type').split('/')[-1]\n href = link.get_attribute('src')\n self.save_resource(course, week, folder_name,\n video + '.' + extension, href)\n if len(subtitles_links) > 0:\n for link in subtitles_links:\n qs = parse_qs(link.get_attribute('src'))\n extension = qs['fileExtension'][0]\n language = link.get_attribute('label')\n href = link.get_attribute('src')\n self.save_resource(course, week, folder_name,\n video + '_' + language + '.' + extension, href)\n\n def save_resource(self, course, week, folder_name, resource, link):\n self.create_folder('downloads/' + course)\n self.create_folder('downloads/' + course + '/' + week)\n self.create_folder('downloads/' + course + '/' +\n week + '/' + folder_name)\n\n ssl._create_default_https_context = ssl._create_unverified_context\n print('Downloading: ', 'downloads/' + course + '/' + week +\n '/' + folder_name + '/' + resource)\n print('')\n wget.download(link, 'downloads/' + course +\n '/' + week + '/' + folder_name + '/' + resource)\n print('')\n\n @staticmethod\n def create_folder(folder):\n try:\n mkdir(folder)\n except:\n pass\n # print('Folder: ' + folder + '. Already created.')\n\n\nwith open('courses.json') as json_file:\n courses = json.load(json_file)\n\ndriver = webdriver.Chrome(\"./chromedriver\")\nsubdriver = webdriver.Chrome(\"./chromedriver\")\ncoursera = SeleniumCoursera(driver, subdriver, timeout=60)\n\nEMAIL = getenv(\"EMAIL\")\nPASSWORD = getenv(\"PASSWORD\")\ncoursera.login(EMAIL, PASSWORD)\n\ncoursera.download_courses(courses)\n" }, { "alpha_fraction": 0.5056179761886597, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 16.799999237060547, "blob_id": "17c4ded1285f516e0826ab39d03a91e73d0cab4b", "content_id": "44099865a94121e7036028f247e5c641060b3dd6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 89, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/requirements.txt", "repo_name": "williamegomezo/coursera-downloader", "src_encoding": "UTF-8", "text": "selenium==3.141.0\nrequests==2.21.0\nbeautifulsoup4==4.7.1\npython-dotenv==0.10.1\nwget==3.2\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 29, "blob_id": "fb2403c07ee53f559a63cfcdbdb1b1232aba6c98", "content_id": "13b801065990f8bd92deb7f455bf1fa04a580c6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "no_license", "max_line_length": 37, "num_lines": 2, "path": "/README.md", "repo_name": "williamegomezo/coursera-downloader", "src_encoding": "UTF-8", "text": "# coursera-downloader\nScrapper to download coursera course.\n" } ]
4
Walesby/MachineLearning-Lab4
https://github.com/Walesby/MachineLearning-Lab4
0afc6e674b3d829fc06eadbfda01a8fca0f3f213
389b2fa86724c175def1d4734880cc76420c8b55
a628cc381d8dc2ebf68c735bda095ff1096f244c
refs/heads/master
2022-03-13T20:04:42.549166
2019-12-16T21:28:58
2019-12-16T21:28:58
228,478,830
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6204732060432434, "alphanum_fraction": 0.6617575883865356, "avg_line_length": 39.838382720947266, "blob_id": "a061a6c84ebf70a59ad9393f80c0f87bbc8439f7", "content_id": "316b18725ec3cd32b6fae4ba392c9cf5c88a9d4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4142, "license_type": "no_license", "max_line_length": 93, "num_lines": 99, "path": "/assignment4a.py", "repo_name": "Walesby/MachineLearning-Lab4", "src_encoding": "UTF-8", "text": "\"\"\"\r\nAssignment 4A - Machine Learning\r\nBy: David Walesby - 000732130\r\nPurpose: To implement a perceptron machine learning algorithm to classify\r\nthe four provided files\r\n\r\nLinearly separable: 1 and 4\r\nNot lineraly separable: 2 and 3\r\n\"\"\"\r\nimport csv\r\nimport numpy as np\r\nimport random\r\nfrom sklearn.preprocessing import normalize\r\n\r\n## This class represents a perceptron object\r\nclass Perceptron(object):\r\n def __init__(self, totalColumns, threshold, learningRate, epoch):\r\n self.epoch = epoch\r\n self.threshold = threshold\r\n self.learningRate = learningRate\r\n self.weights = np.zeros(totalColumns + 1)\r\n\r\n def predict(self, data):\r\n sum = np.dot(data, self.weights[1:]) + self.weights[0]\r\n if sum > self.threshold:\r\n prediction = 1\r\n else:\r\n prediction = 0\r\n return prediction\r\n\r\n def training(self, trainingData, trainingLabels):\r\n for _ in range(self.epoch):\r\n for data, label in zip(trainingData, trainingLabels):\r\n prediction = self.predict(data)\r\n self.threshold += self.learningRate * (label - prediction)\r\n self.weights[1:] += self.learningRate * (label - prediction) * data\r\n self.weights[0] += self.learningRate * (label - prediction)\r\n\r\n## Reads in the datafile and returns the arrays\r\ndef ReadFile(fileName):\r\n trainingData = []\r\n trainingLabels = []\r\n testingData = []\r\n testingLabels = []\r\n\r\n with open(fileName) as file:\r\n csv_reader = csv.reader(file , delimiter=\",\")\r\n line_count = 0\r\n for row in csv_reader:\r\n randomNumber = random.randint(1 , 101)\r\n if randomNumber > 25:\r\n testingData.append(row)\r\n else:\r\n trainingData.append(row)\r\n line_count += 1\r\n print(f'Processed {line_count} lines.')\r\n\r\n trainingData = np.array(trainingData, dtype=np.float32)\r\n testingData = np.array(testingData, dtype=np.float32)\r\n trainingLabels = trainingData[:,-1]\r\n testingLabels = testingData[:,-1]\r\n trainingData = np.delete(trainingData,-1, axis=1)\r\n testingData = np.delete(testingData,-1, axis=1)\r\n return trainingData, trainingLabels, testingData, testingLabels\r\n\r\n## Normalizes the data it recieves and then runs it through the perceptron for classification\r\ndef RunTests(trainingData, trainingLabels, testingData, testingLabels, fileName):\r\n normalizedTrainingData = normalize(trainingData, axis=0, norm='max')\r\n normalizedTestingData = normalize(testingData, axis=0, norm='max')\r\n numberOfInputs = np.size(normalizedTrainingData,1)\r\n perceptron = Perceptron(numberOfInputs, 0, 0.01, 10)\r\n perceptron.training(normalizedTrainingData, trainingLabels)\r\n correct = 0\r\n counter = 0\r\n for data in normalizedTestingData:\r\n result = perceptron.predict(data)\r\n if result == testingLabels[counter]:\r\n correct += 1\r\n counter += 1\r\n accuracy = correct / normalizedTestingData.shape[0]\r\n print()\r\n print(f'{fileName}')\r\n print(\"------------------------------------------------------------------------\")\r\n print(f'Accuracy: {round(accuracy * 100,1)}%')\r\n print(f'Weights: {perceptron.weights[1:]}')\r\n print(f'Threshold: {round(perceptron.threshold, 3)}')\r\n print()\r\n\r\n## Populating arrays with data from files\r\ntrainingData1, trainingLabels1, testingData1, testingLabels1 = ReadFile(\"000732130_1.csv\")\r\ntrainingData2, trainingLabels2, testingData2, testingLabels2 = ReadFile(\"000732130_2.csv\")\r\ntrainingData3, trainingLabels3, testingData3, testingLabels3 = ReadFile(\"000732130_3.csv\")\r\ntrainingData4, trainingLabels4, testingData4, testingLabels4 = ReadFile(\"000732130_4.csv\")\r\n\r\n## Run perceptron tests\r\nRunTests(trainingData1, trainingLabels1, testingData1, testingLabels1,\"000732130_1.csv\" )\r\nRunTests(trainingData2, trainingLabels2, testingData2, testingLabels2,\"000732130_2.csv\" )\r\nRunTests(trainingData3, trainingLabels3, testingData3, testingLabels3,\"000732130_3.csv\" )\r\nRunTests(trainingData4, trainingLabels4, testingData4, testingLabels4,\"000732130_4.csv\" )\r\n" }, { "alpha_fraction": 0.6873835921287537, "alphanum_fraction": 0.7325419187545776, "avg_line_length": 46.2696647644043, "blob_id": "88bfc4afa5e80da3d960ec3cf015e95caa60196a", "content_id": "4d14277bf6b89c79f316209ae4cdc4e9b979a9db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4296, "license_type": "no_license", "max_line_length": 135, "num_lines": 89, "path": "/assignment4b.py", "repo_name": "Walesby/MachineLearning-Lab4", "src_encoding": "UTF-8", "text": "\"\"\"\r\nAssignment 4B - Machine Learning\r\nBy: David Walesby - 000732130\r\nPurpose: To implement a multilayer perceptron classifier\r\n\"\"\"\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport csv\r\nimport numpy as np\r\nimport random\r\nfrom sklearn import tree\r\nfrom sklearn.preprocessing import normalize\r\n\r\n## Reads in the datafile and returns the arrays\r\ndef ReadFile(fileName):\r\n trainingData = []\r\n trainingLabels = []\r\n testingData = []\r\n testingLabels = []\r\n\r\n with open(fileName) as file:\r\n csv_reader = csv.reader(file , delimiter=\",\")\r\n line_count = 0\r\n for row in csv_reader:\r\n randomNumber = random.randint(1 , 101)\r\n if randomNumber > 25:\r\n testingData.append(row)\r\n else:\r\n trainingData.append(row)\r\n line_count += 1\r\n print(f'Processed {line_count} lines.')\r\n\r\n trainingData = np.array(trainingData, dtype=np.float32)\r\n testingData = np.array(testingData, dtype=np.float32)\r\n trainingLabels = trainingData[:,-1]\r\n testingLabels = testingData[:,-1]\r\n trainingData = np.delete(trainingData,-1, axis=1)\r\n testingData = np.delete(testingData,-1, axis=1)\r\n return trainingData, trainingLabels, testingData, testingLabels\r\n\r\n## Runs the data for classification through a decision tree classifier and a Multi Layer Perceptron classifier and displays the results\r\ndef RunTests(normalizedTrainingData, trainingLabels, normalizedTestingData, testingLabels, fileName):\r\n clf = tree.DecisionTreeClassifier()\r\n clf = clf.fit(normalizedTrainingData, trainingLabels)\r\n decisionPrediction = clf.predict(normalizedTestingData)\r\n decisionCorrect = (decisionPrediction == testingLabels).sum()\r\n decisionTreeAccuracy = decisionCorrect/len(decisionPrediction)*100\r\n\r\n mlpPerceptron = MLPClassifier(hidden_layer_sizes= 15,max_iter=250, learning_rate_init=0.17)\r\n mlpPerceptron.fit(normalizedTrainingData,trainingLabels)\r\n mlpPrediction = mlpPerceptron.predict(normalizedTestingData)\r\n mlpCorrect = (mlpPrediction == testingLabels).sum()\r\n mlpAccuracy = mlpCorrect/len(mlpPrediction)*100\r\n print()\r\n print(f'{fileName}')\r\n print(\"------------------------------------------------------------------------\")\r\n print(f'Accuracy Tree: {round(decisionTreeAccuracy,1)}%')\r\n print(f'Accuracy MLP: {round(mlpAccuracy,1)}%')\r\n print(f'{mlpPerceptron.get_params()}')\r\n print()\r\n\r\nclf = tree.DecisionTreeClassifier()\r\n\r\n## Store file information\r\ntrainingData1, trainingLabels1, testingData1, testingLabels1 = ReadFile(\"000732130_1.csv\")\r\ntrainingData2, trainingLabels2, testingData2, testingLabels2 = ReadFile(\"000732130_2.csv\")\r\ntrainingData3, trainingLabels3, testingData3, testingLabels3 = ReadFile(\"000732130_3.csv\")\r\ntrainingData4, trainingLabels4, testingData4, testingLabels4 = ReadFile(\"000732130_4.csv\")\r\ntrainingData5, trainingLabels5, testingData5, testingLabels5 = ReadFile(\"dexter.csv\")\r\n\r\n## Normalize the training data\r\nnormalizedTrainingData1 = normalize(trainingData1, axis=0, norm='max')\r\nnormalizedTrainingData2 = normalize(trainingData2, axis=0, norm='max')\r\nnormalizedTrainingData3 = normalize(trainingData3, axis=0, norm='max')\r\nnormalizedTrainingData4 = normalize(trainingData4, axis=0, norm='max')\r\nnormalizedTrainingData5 = normalize(trainingData5, axis=0, norm='max')\r\n\r\n## Normalize the testing data\r\nnormalizedTestingData1 = normalize(testingData1, axis=0, norm='max')\r\nnormalizedTestingData2 = normalize(testingData2, axis=0, norm='max')\r\nnormalizedTestingData3 = normalize(testingData3, axis=0, norm='max')\r\nnormalizedTestingData4 = normalize(testingData4, axis=0, norm='max')\r\nnormalizedTestingData5 = normalize(testingData5, axis=0, norm='max')\r\n\r\n## Run tests\r\nRunTests(normalizedTrainingData1, trainingLabels1, normalizedTestingData1, testingLabels1,\"000732130_1.csv\")\r\nRunTests(normalizedTrainingData2, trainingLabels2, normalizedTestingData2, testingLabels2,\"000732130_2.csv\")\r\nRunTests(normalizedTrainingData3, trainingLabels3, normalizedTestingData3, testingLabels3,\"000732130_3.csv\")\r\nRunTests(normalizedTrainingData4, trainingLabels4, normalizedTestingData4, testingLabels4,\"000732130_4.csv\")\r\nRunTests(normalizedTrainingData5, trainingLabels5, normalizedTestingData5, testingLabels5,\"dexter.csv\")\r\n" } ]
2
miladhzz/custom_auth_daneshjooyar
https://github.com/miladhzz/custom_auth_daneshjooyar
76e339d25a192ce83ba990876af4ab2d6e479f01
97296049c337f095343401dbbb371dadacfe77dd
bd217d75c3b99c3dacde25c99500bc766465ae91
refs/heads/master
2022-11-22T19:30:06.808572
2020-07-24T11:02:06
2020-07-24T11:02:06
280,474,706
5
0
null
null
null
null
null
[ { "alpha_fraction": 0.5158730149269104, "alphanum_fraction": 0.5345471501350403, "avg_line_length": 27.171052932739258, "blob_id": "afa2b4b93da2bbc37ae450b52b5192a8f36474cd", "content_id": "bb13ad3a0a24436577d8431e3439411482d31b5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2142, "license_type": "no_license", "max_line_length": 73, "num_lines": 76, "path": "/custom_login/helper.py", "repo_name": "miladhzz/custom_auth_daneshjooyar", "src_encoding": "UTF-8", "text": "from kavenegar import *\nfrom custom_auth_daneshjooyar.settings import Kavenegar_API\nfrom random import randint\nfrom zeep import Client\nfrom . import models\nimport datetime\nimport time\nfrom background_task import background\n\n\ndef send_otp(mobile, otp):\n mobile = [mobile, ]\n try:\n api = KavenegarAPI(Kavenegar_API)\n params = {\n 'sender': '1000596446', # optional\n 'receptor': mobile, # multiple mobile number, split by comma\n 'message': 'Your OTP is {}'.format(otp),\n }\n response = api.sms_send(params)\n print('OTP: ', otp)\n print(response)\n except APIException as e:\n print(e)\n except HTTPException as e:\n print(e)\n\n\n@background(schedule=10)\ndef send_otp_soap(mobile, otp):\n\n time.sleep(10)\n client = Client('http://api.kavenegar.com/soap/v1.asmx?WSDL')\n receptor = [mobile, ]\n\n empty_array_placeholder = client.get_type('ns0:ArrayOfString')\n receptors = empty_array_placeholder()\n for item in receptor:\n receptors['string'].append(item)\n\n api_key = Kavenegar_API\n message = 'Your OTP is {}'.format(otp)\n sender = '1000596446'\n status = 0\n status_message = ''\n\n result = client.service.SendSimpleByApikey(api_key,\n sender,\n message,\n receptors,\n 0,\n 1,\n status,\n status_message)\n print(result)\n print('OTP: ', otp)\n\n\ndef get_random_otp():\n return randint(1000, 9999)\n\n\ndef check_otp_expiration(mobile):\n try:\n user = models.MyUser.objects.get(mobile=mobile)\n now = datetime.datetime.now()\n otp_time = user.otp_create_time\n diff_time = now - otp_time\n print('OTP TIME: ', diff_time)\n\n if diff_time.seconds > 120:\n return False\n return True\n\n except models.MyUser.DoesNotExist:\n return False\n\n" }, { "alpha_fraction": 0.6583850979804993, "alphanum_fraction": 0.6583850979804993, "avg_line_length": 19.125, "blob_id": "3367bd549afdb73535c5c8e4ca4590bdbceafd86", "content_id": "98b3bfadc19b5675afb85a0a02fc81d85d846403", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 36, "num_lines": 8, "path": "/custom_login/forms.py", "repo_name": "miladhzz/custom_auth_daneshjooyar", "src_encoding": "UTF-8", "text": "from django import forms\nfrom . import models\n\n\nclass RegisterForm(forms.ModelForm):\n class Meta:\n model = models.MyUser\n fields = ['mobile', ]\n" }, { "alpha_fraction": 0.6530612111091614, "alphanum_fraction": 0.6530612111091614, "avg_line_length": 23.33333396911621, "blob_id": "44feae1d52a2d5a3e551d7d5b68d47de85a73bc0", "content_id": "8c6f8e3c547302f023a54eec3353a4a942b5b6b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 294, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/custom_login/signals.py", "repo_name": "miladhzz/custom_auth_daneshjooyar", "src_encoding": "UTF-8", "text": "\n\ndef my_callback(sender, **kwargs):\n print(\"Request finished\")\n\n\ndef create_new_user(sender, instance, created, **kwargs):\n if created:\n print(\"new user\", instance)\n\n\ndef update_user(sender, instance, created, **kwargs):\n if not created:\n print(\"update user\", instance)\n" }, { "alpha_fraction": 0.5722851157188416, "alphanum_fraction": 0.5722851157188416, "avg_line_length": 33.1136360168457, "blob_id": "c4e97f3aaea38b45d629d860fb285d21241ffff0", "content_id": "90b38b370420451e846bb0316e2eece40009a3f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3002, "license_type": "no_license", "max_line_length": 76, "num_lines": 88, "path": "/custom_login/views.py", "repo_name": "miladhzz/custom_auth_daneshjooyar", "src_encoding": "UTF-8", "text": "from django.contrib.auth import login\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.shortcuts import render\nfrom .models import MyUser\nfrom . import forms\nfrom . import helper\nfrom django.contrib import messages\n\n\ndef register_view(request):\n form = forms.RegisterForm\n\n if request.method == \"POST\":\n try:\n if \"mobile\" in request.POST:\n mobile = request.POST.get('mobile')\n user = MyUser.objects.get(mobile=mobile)\n # send otp\n otp = helper.get_random_otp()\n # helper.send_otp(mobile, otp)\n helper.send_otp_soap(mobile, otp)\n # save otp\n print(otp)\n user.otp = otp\n user.save()\n request.session['user_mobile'] = user.mobile\n return HttpResponseRedirect(reverse('verify'))\n\n except MyUser.DoesNotExist:\n form = forms.RegisterForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n # send otp\n otp = helper.get_random_otp()\n # helper.send_otp(mobile, otp)\n helper.send_otp_soap(mobile, otp)\n # save otp\n print(otp)\n user.otp = otp\n user.is_active = False\n user.save()\n request.session['user_mobile'] = user.mobile\n return HttpResponseRedirect(reverse('verify'))\n return render(request, 'register.html', {'form': form})\n\n\ndef verify(request):\n try:\n mobile = request.session.get('user_mobile')\n user = MyUser.objects.get(mobile = mobile)\n\n if request.method == \"POST\":\n\n # check otp expiration\n if not helper.check_otp_expiration(user.mobile):\n messages.error(request, \"OTP is expired, please try again.\")\n return HttpResponseRedirect(reverse('register_view'))\n\n if user.otp != int(request.POST.get('otp')):\n messages.error(request, \"OTP is incorrect.\")\n return HttpResponseRedirect(reverse('verify'))\n\n user.is_active = True\n user.save()\n login(request, user)\n return HttpResponseRedirect(reverse('dashboard'))\n\n return render(request, 'verify.html', {'mobile': mobile})\n\n except MyUser.DoesNotExist:\n messages.error(request, \"Error accorded, try again.\")\n return HttpResponseRedirect(reverse('register_view'))\n\n\n# def mobile_login(request):\n# if request.method == \"POST\":\n# if \"mobile\" in request.POST:\n# mobile = request.POST.get('mobile')\n# user = MyUser.objects.get(mobile=mobile)\n# login(request, user)\n# return HttpResponseRedirect(reverse('dashboard'))\n#\n# return render(request, 'mobile_login.html')\n\n\ndef dashboard(request):\n return render(request, 'dashboard.html')\n" }, { "alpha_fraction": 0.7282378077507019, "alphanum_fraction": 0.7282378077507019, "avg_line_length": 32.64285659790039, "blob_id": "faabd14453687e7f6cd40cded0bb235e0b8ad70a", "content_id": "39e357deb2d73759cb970b55465a75cd15b790a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 72, "num_lines": 14, "path": "/custom_login/apps.py", "repo_name": "miladhzz/custom_auth_daneshjooyar", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\nfrom django.core.signals import request_finished\nfrom django.db.models.signals import post_save\n\n\nclass CustomLoginConfig(AppConfig):\n name = 'custom_login'\n\n def ready(self):\n from . import signals\n from . import models\n request_finished.connect(signals.my_callback)\n post_save.connect(signals.create_new_user, sender=models.MyUser)\n post_save.connect(signals.update_user, sender=models.MyUser)\n" } ]
5
matthijsvk/convNets
https://github.com/matthijsvk/convNets
d01393818bcd3a5f33a7b2e488dc6a20823beca8
7e65db7857a4e6abfbcab264953eb7741319de6c
56bdfdd68447dccb0ef6aff0fd98f71e228ae0ae
refs/heads/master
2023-01-06T13:37:17.549948
2017-03-19T20:32:15
2017-03-19T20:32:15
69,613,522
0
1
Apache-2.0
2016-09-29T22:39:56
2017-04-08T07:46:31
2017-03-19T20:33:00
Jupyter Notebook
[ { "alpha_fraction": 0.5287169218063354, "alphanum_fraction": 0.5366598963737488, "avg_line_length": 31.296052932739258, "blob_id": "78ba4baac65306b1bb0b2c299f56c00eed010ebd", "content_id": "9a051e6b4feab9d3ad1d17005c6e4cd1cdbcb0d8", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 4910, "license_type": "permissive", "max_line_length": 101, "num_lines": 152, "path": "/code/audioSR/HTK/htk/Makefile", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------- \n# \n# ___ \n# |_| | |_/ SPEECH \n# | | | | \\ RECOGNITION \n# ========= SOFTWARE \n# \n# \n# ----------------------------------------------------------- \n# Copyright: Cambridge University\n# 1995-2006 Engineering Department\n# http://htk.eng.cam.ac.uk\n# http://mi.eng.cam.ac.uk\n# \n# Use of this software is governed by a License Agreement \n# ** See the file License for the Conditions of Use ** \n# ** This banner notice must not be removed ** \n# \n# ----------------------------------------------------------- \n# File: Makefile. Generated from Makefile.in by configure.\n# ----------------------------------------------------------- \n\nSHELL = /bin/sh\nsrcdir = .\ntop_srcdir = .\n\nprefix = /usr/local\nexec_prefix = ${prefix}\nbindir = ${exec_prefix}/bin\nsbindir = ${exec_prefix}/sbin\nlibexecdir = ${exec_prefix}/libexec\ndatadir = ${prefix}/share\nsysconfdir = ${prefix}/etc\nsharedstatedir = ${prefix}/com\nlocalstatedir = ${prefix}/var\nlibdir = ${exec_prefix}/lib\ninfodir = ${prefix}/share/info\nmandir = ${prefix}/share/man\nincludedir = ${prefix}/include\noldincludedir = /usr/include\n\nCC = gcc\nCPPFLAGS = \nCFLAGS = $(CPPFLAGS) -m32 -ansi -D_SVID_SOURCE -DOSS_AUDIO -D'ARCH=\"x86_64\"' -Wall -Wno-switch -g -O2\nLDFLAGS = -L/usr/X11R6/lib \nLIBS = -lm -lX11 \nINSTALL = /usr/bin/install -c\n\nDESTDIR =\n\npkgdatadir = $(datadir)/@PACKAGE@\npkglibdir = $(libdir)/@PACKAGE@\npkgincludedir = $(includedir)/@PACKAGE@\n\ntop_builddir = .\n\nACLOCAL = @ACLOCAL@\nAUTOCONF = @AUTOCONF@\nAUTOMAKE = @AUTOMAKE@\nAUTOHEADER = @AUTOHEADER@\n\nINSTALL = /usr/bin/install -c\nINSTALL_PROGRAM = ${INSTALL} $(AM_INSTALL_PROGRAM_FLAGS)\nINSTALL_DATA = ${INSTALL} -m 644\nINSTALL_SCRIPT = ${INSTALL}\ntransform = s,x,x,\nMAKEINFO = @MAKEINFO@\nPACKAGE = @PACKAGE@\nVERSION = @VERSION@\n\nHTKLIB = HTKLib\nHLMLIB = HLMLib\nHTKTOOLS = HTKTools\nHLMTOOLS = HLMTools\nLVREC = HTKLVRec\nHTKBOOK = HTKBook\nSUBDIRS = $(HTKLIB) $(HLMLIB) $(HTKTOOLS) $(HLMTOOLS) $(LVREC) $(BOOK) \nACLOCAL_M4 = $(top_srcdir)/aclocal.m4\nmkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs\nCONFIG_CLEAN_FILES = \nDIST_COMMON = README ChangeLog Makefile.am \\\nMakefile.in aclocal.m4 configure configure.ac install-sh missing \\\nmkinstalldirs\n\n\nDISTFILES = $(DIST_COMMON) $(SOURCES) $(HEADERS) $(TEXINFOS) $(EXTRA_DIST)\n\nTAR = gtar\nGZIP_ENV = --best\n\n.SUFFIXES:\n\n# build rules\nall: htktools hlmtools\n$(HTKLIB)/HTKLib.a:\n\t(cd $(HTKLIB) && $(MAKE) HTKLib.a) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nhtklib: $(HTKLIB)/HTKLib.a\n$(HTKLIB)/HTKLiblv.a:\n\t(cd $(HTKLIB) && $(MAKE) HTKLiblv.a) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nhtkliblv: $(HTKLIB)/HTKLiblv.a\n$(HLMLIB)/HLMLib.a:\n\t(cd $(HLMLIB) && $(MAKE) all) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nhlmlib: $(HLMLIB)/HLMLib.a\nhtktools: $(HTKLIB)/HTKLib.a\n\t(cd $(HTKTOOLS) && $(MAKE) all) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nhlmtools: $(HLMLIB)/HLMLib.a\n\t(cd $(HLMTOOLS) && $(MAKE) all) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nhdecode: $(HTKLIB)/HTKLiblv.a\n\t(cd $(LVREC) && $(MAKE) all) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\nbook: \n\t(cd $(HTKBOOK) && $(MAKE) all) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\n\n# installation\ninstall-htktools: htktools\n\t(cd $(HTKTOOLS) && $(MAKE) install) \\\n\t|| case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\ninstall-hlmtools: hlmtools\n\t(cd $(HLMTOOLS) && $(MAKE) install) \\\n\t|| case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\ninstall-hdecode:\n\t(cd $(LVREC) && $(MAKE) install) \\\n\t|| case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\ninstall-book: book\n\t(cd $(HTKBOOK) && $(MAKE) install) \\\n\t|| case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac;\n\nclean:\n\t/bin/rm -f *~\n\t@for dir in $(SUBDIRS); do \\\n\t (cd $$dir && $(MAKE) clean) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac; \\\n\tdone && test -z \"$$fail\"\ndistclean: clean\n\t/bin/rm -f Makefile config.h config.status config.cache config.log\n\t@for dir in $(SUBDIRS); do \\\n\t (cd $$dir && $(MAKE) distclean) \\\n\t || case \"$(MFLAGS)\" in *k*) fail=yes;; *) exit 1;; esac; \\\n\tdone && test -z \"$$fail\"\n\ninstall: install-htktools install-hlmtools\ndocs: book\n\n.PHONY: all doc install clean distclean htklib-decode \\\n\thtktools hlmtools hdecode docs book \\\n\tinstall-htktools install-hlmtools install-hdecode install-book\n\n" }, { "alpha_fraction": 0.5371367335319519, "alphanum_fraction": 0.5479009747505188, "avg_line_length": 37.70833206176758, "blob_id": "10458640428546847068b83395caa392fb9651b9", "content_id": "465a8d8f5f8bcfafd04d869aa1152f5981e9102d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "permissive", "max_line_length": 74, "num_lines": 24, "path": "/code/audioSR/Spoken-language-identification-master/choose_equal_split.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\"split data into training and validation sets\"\"\"\nimport csv\n\nwith open('trainingData.csv', 'rb') as csvfile:\n next(csvfile) #skip headers\n data = list(csv.reader(csvfile, delimiter=','))\n\n #Map every language to an ID\n langs = set([language.strip() for _,language in data])\n ID = {lang: i for i,lang in enumerate(sorted(langs))}\n\n #Write first 306 items to training set and the rest to validation set\n cnt = [0 for _ in range(len(langs))]\n with open('trainEqual.csv', 'w') as train:\n with open('valEqaul.csv', 'w') as val:\n for line in data:\n filepath, language = map(str.strip, line)\n id_lang = ID[language]\n\n if (cnt[id_lang] < 306):\n train.write(filepath[:-4] + ',' + str(id_lang) + '\\n')\n else:\n val.write(filepath[:-4] + ',' + str(id_lang) + '\\n')\n cnt[id_lang] += 1\n" }, { "alpha_fraction": 0.5530086159706116, "alphanum_fraction": 0.5558739304542542, "avg_line_length": 25.923076629638672, "blob_id": "ecdd5631116abe73435e4d61a4fb81876ce2d473", "content_id": "6f355f0a59e023c270cc5a258ce52c45ea5e74bb", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 349, "license_type": "permissive", "max_line_length": 51, "num_lines": 13, "path": "/code/audioSR/Preprocessing/helpFunctions.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\ndef writeToTxt(lines, path):\n if (not os.path.exists(os.path.dirname(path))):\n os.makedirs(os.path.dirname(path))\n file = open(path, 'w')\n for line in lines:\n if (lines.index(line) < len(lines) - 1):\n file.write(\"%s\\n\" % line)\n else:\n file.write(\"%s\" % line)\n file.close()" }, { "alpha_fraction": 0.5506406426429749, "alphanum_fraction": 0.5930445194244385, "avg_line_length": 35.831459045410156, "blob_id": "ac1c7e45e3e786f68473e1b112d669a2a9f22009", "content_id": "0bae23615d493c7124ea4d082646e144a26d9f47", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3278, "license_type": "permissive", "max_line_length": 125, "num_lines": 89, "path": "/code/audioSR/Spoken-language-identification-master/test_augm_network.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import sys\nimport caffe\nimport numpy as np\n\ncaffe.set_mode_gpu()\n\n# info about classes\nfile = open('trainingData.csv')\ndata = file.readlines()[1:]\nlangs = set()\nfor line in data:\n filepath, language = line.split(',')\n language = language.strip()\n langs.add(language)\nlangs = sorted(langs)\n\n\n# network parameters:\ndeploy_name = 'augm_32r-2-64r-2-64r-2-128r-2-128r-2-256r-2-1024rd0.3-1024rd0.3'\nnetwork_name = 'augm_dropout0.3_on_augm84K-lr0.01_30K'\niterations = '90000'\naveSamples = 20 # average over this many samples\n\nnet = caffe.Classifier(model_file='prototxt/deploy.' + deploy_name + '.prototxt',\n pretrained_file='models/' + network_name + '_iter_' + iterations + '.caffemodel')\n\nnet.blobs['data'].reshape(1, 1, 256, 768)\npredict_set = sys.argv[1]\n\nif (predict_set == \"test\"):\n folder = 'test/png/'\n f = open('testingData.csv')\n cnt = 12320\n print_file = open('predictions/test_' + network_name + '_iter_' + iterations + '_' + str(aveSamples) + '.csv', 'w')\nelif (predict_set == \"val\"):\n folder = '/home/brainstorm/caffe/Data/mnt/3/language/train/pngaugm/'\n f = open('valEqual.csv')\n cnt = 12320\n print_file = open('predictions/validation_' + network_name + '_iter_' + iterations + '_' + str(aveSamples) + '.csv', 'w')\nelse: # train\n folder = '/home/brainstorm/caffe/Data/mnt/3/language/train/pngaugm/'\n f = open('trainEqual.csv')\n cnt = 10000\n print_file = open('predictions/train_' + network_name + '_iter_' + iterations + '_' + str(aveSamples) + '.csv', 'w')\n \npreds = []\nlabels = []\ntopcoder_score = 0.0\nprocessed = 0\n\nfor iter in range(cnt):\n st = f.readline()\n if (predict_set == \"val\" or predict_set == \"train\"):\n (name, label) = st.split(',')\n label = int(label)\n else:\n name = st.strip()[:-4]\n processed += 1\n out = np.zeros((176, ))\n for randomIndex in range(aveSamples):\n image = caffe.io.load_image(folder + name + '.' + str(randomIndex) + '.png', color=False)\n image = np.transpose(image, (2, 0, 1))\n #image = np.concatenate([image, np.zeros((1, 256, 858 - 768), dtype=np.float32)], axis=2)\n net.blobs['data'].data[...] = image\n out += net.forward()['loss'][0]\n\n pred = sorted([(x, it) for it, x in enumerate(out)], reverse=True)\n \n if (predict_set == \"val\" or predict_set == \"train\"):\n if (pred[0][1] == label):\n topcoder_score = topcoder_score + 1000\n elif (pred[1][1] == label):\n topcoder_score = topcoder_score + 400\n elif (pred[2][1] == label): \n topcoder_score = topcoder_score + 160\n \n for i in range(3):\n lang_id = pred[i][1]\n lang = langs[lang_id]\n print_file.write(name + '.mp3,' + lang + ',' + str(i + 1) + '\\n')\n\n if (iter % 100 == 0):\n print >> sys.stderr, network_name + '_iter_' + iterations + '_' + str(aveSamples)\n print >> sys.stderr, \"processed %d / %d images (%d samples/mp3)\" % (iter, cnt, aveSamples)\n print >> sys.stderr, \"score: \", topcoder_score\n print >> sys.stderr, \"expected score:\", topcoder_score / processed * 35200\n\nprint >> sys.stderr, \"Final score: \", topcoder_score, \" / \", cnt, \"000\"\nprint >> sys.stderr, \"expected score:\", topcoder_score / processed * 35200\n" }, { "alpha_fraction": 0.4958658814430237, "alphanum_fraction": 0.5006890296936035, "avg_line_length": 30.54347801208496, "blob_id": "acefe016f910f22e0e75c97c1596c9fe56cdd92d", "content_id": "92fd7f7bbc4cb27a1ff539a434eabb3827bbe678", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4354, "license_type": "permissive", "max_line_length": 118, "num_lines": 138, "path": "/code/lipreading/koen/train_net.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import time\n\nfrom collections import OrderedDict\n\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\n\nimport lasagne\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom theano.scalar.basic import UnaryScalarOp, same_out_nocomplex\nfrom theano.tensor.elemwise import Elemwise\n\n\n# Given a dataset and a model, this function trains the model on the dataset for several epochs\n# (There is no default trainer function in Lasagne yet)\ndef train (train_fn, val_fn,\n model,\n batch_size,\n LR_start, LR_decay,\n num_epochs,\n X_train, y_train,\n X_val, y_val,\n X_test, y_test,\n save_path=None,\n shuffle_parts=1):\n # A function which shuffles a dataset\n def shuffle (X, y):\n \n # print(len(X))\n \n chunk_size = len(X) / shuffle_parts\n shuffled_range = range(chunk_size)\n \n X_buffer = np.copy(X[0:chunk_size])\n y_buffer = np.copy(y[0:chunk_size])\n \n for k in range(shuffle_parts):\n \n np.random.shuffle(shuffled_range)\n \n for i in range(chunk_size):\n X_buffer[i] = X[k * chunk_size + shuffled_range[i]]\n y_buffer[i] = y[k * chunk_size + shuffled_range[i]]\n \n X[k * chunk_size:(k + 1) * chunk_size] = X_buffer\n y[k * chunk_size:(k + 1) * chunk_size] = y_buffer\n \n return X, y\n \n # shuffled_range = range(len(X))\n # np.random.shuffle(shuffled_range)\n \n # new_X = np.copy(X)\n # new_y = np.copy(y)\n \n # for i in range(len(X)):\n \n # new_X[i] = X[shuffled_range[i]]\n # new_y[i] = y[shuffled_range[i]]\n \n # return new_X,new_y\n \n # This function trains the model a full epoch (on the whole dataset)\n def train_epoch (X, y, LR):\n \n loss = 0\n batches = len(X) / batch_size\n \n for i in range(batches):\n loss += train_fn(X[i * batch_size:(i + 1) * batch_size], y[i * batch_size:(i + 1) * batch_size], LR)\n \n loss /= batches\n \n return loss\n \n # This function tests the model a full epoch (on the whole dataset)\n def val_epoch (X, y):\n \n err = 0\n loss = 0\n batches = len(X) / batch_size\n \n for i in range(batches):\n new_loss, new_err = val_fn(X[i * batch_size:(i + 1) * batch_size], y[i * batch_size:(i + 1) * batch_size])\n err += new_err\n loss += new_loss\n \n err = err / batches * 100\n loss /= batches\n \n return err, loss\n \n # shuffle the train set\n X_train, y_train = shuffle(X_train, y_train)\n best_val_err = 100\n best_epoch = 1\n LR = LR_start\n \n # We iterate over epochs:\n for epoch in range(num_epochs):\n \n start_time = time.time()\n \n train_loss = train_epoch(X_train, y_train, LR)\n X_train, y_train = shuffle(X_train, y_train)\n \n val_err, val_loss = val_epoch(X_val, y_val)\n \n # test if validation error went down\n if val_err <= best_val_err:\n \n best_val_err = val_err\n best_epoch = epoch + 1\n \n test_err, test_loss = val_epoch(X_test, y_test)\n \n if save_path is not None:\n np.savez(save_path, *lasagne.layers.get_all_param_values(model))\n \n epoch_duration = time.time() - start_time\n \n # Then we print the results for this epoch:\n print(\"Epoch \" + str(epoch + 1) + \" of \" + str(num_epochs) + \" took \" + str(epoch_duration) + \"s\")\n print(\" LR: \" + str(LR))\n print(\" training loss: \" + str(train_loss))\n print(\" validation loss: \" + str(val_loss))\n print(\" validation error rate: \" + str(val_err) + \"%\")\n print(\" best epoch: \" + str(best_epoch))\n print(\" best validation error rate: \" + str(best_val_err) + \"%\")\n print(\" test loss: \" + str(test_loss))\n print(\" test error rate: \" + str(test_err) + \"%\")\n \n # decay the LR\n LR *= LR_decay\n\n" }, { "alpha_fraction": 0.6481732130050659, "alphanum_fraction": 0.6771312355995178, "avg_line_length": 31.66371726989746, "blob_id": "b71ac174c032e760b23163a55cb837e9d7a5b244", "content_id": "abaf7e1c755941fef24c0e190b0cd344941e9c42", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3695, "license_type": "permissive", "max_line_length": 120, "num_lines": 113, "path": "/code/Experiments/Lasagne_examples/examples/ResNets/resnet152/resnet152_test.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import urllib\nimport io\nimport skimage.transform\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 8, 6\nimport argparse\nimport time\nimport pickle\nimport numpy as np\nimport theano\nimport lasagne\n\nparser = argparse.ArgumentParser(description=\"Getting top 5 classes of images\")\n\nadd_arg = parser.add_argument\n\nadd_arg(\"-i\", \"--input_image\", help=\"Input image\")\nadd_arg(\"-m\", \"--model_file\", help=\"Model pickle file\")\n\nargs = parser.parse_args()\n\nimport resnet152\n\ndef prep_image(fname, mean_values):\n t0 = time.time()\n ext = fname.split('.')[-1]\n im = plt.imread(fname, ext)\n h, w, _ = im.shape\n if h < w:\n im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)\n else:\n im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)\n h, w, _ = im.shape\n im = im[h//2-112:h//2+112, w//2-112:w//2+112]\n # h, w, _ = im.shape\n # im = skimage.transform.resize(im, (224, 224), preserve_range=True)\n h, w, _ = im.shape\n rawim = np.copy(im).astype('uint8')\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n im = im[::-1, :, :]\n im = im - mean_values\n t1 = time.time()\n print \"Time taken in preparing the image : {}\".format(t1 - t0)\n return rawim, im[np.newaxis].astype('float32')\n\ndef get_net_fun(pkl_model):\n\tnet, mean_img, synset_words = resnet152.load_model(pkl_model)\n\n\tget_class_prob = theano.function([net['input'].input_var], lasagne.layers.get_output(net['prob'],deterministic=True))\n\n\tdef print_top5(im_path):\n\t\traw_im, im = prep_image(im_path, mean_img)\n\t\tprob = get_class_prob(im)[0]\n\t\tres = sorted(zip(synset_words, prob), key=lambda t: t[1], reverse=True)[:5]\n\t\tfor c, p in res:\n\t\t\tprint ' ', c, p\n\n\treturn get_class_prob, print_top5\n\ndef get_feature_extractor(pkl_model, layer_name):\n\tnet, mean_img, synset_words = resnet152.load_model(pkl_model)\n\tlayer_output = theano.function([net['input'].input_var], lasagne.layers.get_output(net[layer_name],deterministic=True))\n\n\tdef feature_extractor(im_path):\n\t\traw_im, im = prep_image(im_path, mean_img)\n\t\treturn layer_output(im)[0]\n\n\treturn feature_extractor\n\nif __name__ == \"__main__\":\n\tprint \"Compiling functions...\"\n\tget_prob, print_top5 = get_net_fun(args.model_file) # expects pkl model\n\tt0 = time.time()\n\tprint_top5(args.input_image)\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\tprint \"Compiling function for getting conv1 ....\"\n\tfeature_extractor = get_feature_extractor(args.model_file, 'conv1')\n\tt0 = time.time()\n\tprint feature_extractor(args.input_image).shape\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\tprint \"Compiling function for getting res2c ....\"\n\tfeature_extractor = get_feature_extractor(args.model_file, 'res2c')\n\tt0 = time.time()\n\tprint feature_extractor(args.input_image).shape\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\tprint \"Compiling function for getting res3d ....\"\n\tfeature_extractor = get_feature_extractor(args.model_file, 'res3d')\n\tt0 = time.time()\n\tprint feature_extractor(args.input_image).shape\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\tprint \"Compiling function for getting conv res4f ....\"\n\tfeature_extractor = get_feature_extractor(args.model_file, 'res4f')\n\tt0 = time.time()\n\tprint feature_extractor(args.input_image).shape\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\tprint \"Compiling function for getting conv res5c ....\"\n\tfeature_extractor = get_feature_extractor(args.model_file, 'res5c')\n\tt0 = time.time()\n\tprint feature_extractor(args.input_image).shape\n\tt1 = time.time()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\n\n\n" }, { "alpha_fraction": 0.5116029977798462, "alphanum_fraction": 0.5329049229621887, "avg_line_length": 31.714859008789062, "blob_id": "b9838469c54a726fccf1f10177cec08f1556d295", "content_id": "59e2c566e761c974a2c8cd4aa4639fd12877ee4a", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8403, "license_type": "permissive", "max_line_length": 100, "num_lines": 249, "path": "/code/Experiments/BinaryNet-master/Run-time/binary_ops.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\r\nimport time\r\n\r\nimport numpy as np\r\nimport theano\r\nimport theano.tensor as T\r\n\r\nimport theano.misc.pycuda_init\r\nfrom pycuda.compiler import SourceModule\r\nimport pycuda.driver as drv\r\n\r\nimport theano.sandbox.cuda as cuda\r\nfrom theano.sandbox.cuda.basic_ops import host_from_gpu\r\n\r\nimport lasagne\r\n\r\n# Homemade (and very unoptimized) \r\n# Theano GPU matrix multiplication operation\r\n# Our 'baseline' kernel\r\nclass Gemm(cuda.GpuOp):\r\n \r\n def __eq__(self, other):\r\n return type(self) == type(other)\r\n\r\n def __hash__(self):\r\n return hash(type(self))\r\n\r\n def __str__(self):\r\n return self.__class__.__name__\r\n \r\n def make_node(self, inp1, inp2):\r\n inp1 = cuda.basic_ops.gpu_contiguous(cuda.basic_ops.as_cuda_ndarray_variable(inp1))\r\n inp2 = cuda.basic_ops.gpu_contiguous(cuda.basic_ops.as_cuda_ndarray_variable(inp2))\r\n\r\n assert inp1.dtype == \"float32\"\r\n assert inp2.dtype == \"float32\"\r\n assert inp1.ndim == 2\r\n assert inp2.ndim == 2\r\n\r\n return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])\r\n \r\n def output_type(self, inp):\r\n return cuda.CudaNdarrayType(broadcastable=[False, False])\r\n\r\n def make_thunk(self, node, storage_map, _, _2):\r\n \r\n mod = SourceModule(open(\"binary_kernels.cu\").read())\r\n gemm_kernel = mod.get_function(\"gemm\")\r\n \r\n inputs = [storage_map[v] for v in node.inputs]\r\n outputs = [storage_map[v] for v in node.outputs]\r\n\r\n def thunk():\r\n \r\n # inputs\r\n A = inputs[0][0]\r\n B = inputs[1][0]\r\n \r\n # dimensions\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n k = B.shape[1]\r\n assert n == B.shape[0] # Otherwise GEMM is impossible\r\n assert n%16 == 0 # Block size\r\n \r\n # output\r\n output_shape = (m, k)\r\n C = outputs[0]\r\n # only allocate if there is no previous allocation of the right size.\r\n if C[0] is None or C[0].shape != output_shape:\r\n C[0] = cuda.CudaNdarray.zeros(output_shape)\r\n \r\n # Launching GEMM GPU kernel \r\n block_size = 16\r\n block = (block_size,block_size,1)\r\n grid = (k / block_size+1, m / block_size+1) # better too many blocks than too little\r\n gemm_kernel(A,B,C[0], np.intc(m), np.intc(n), np.intc(k), block= block, grid=grid)\r\n \r\n thunk.inputs = inputs\r\n thunk.outputs = outputs\r\n thunk.lazy = False\r\n\r\n return thunk\r\n \r\ngemm = Gemm()\r\n\r\n# Our 'XNOR' kernel\r\nclass XnorGemm(cuda.GpuOp):\r\n \r\n def __eq__(self, other):\r\n return type(self) == type(other)\r\n\r\n def __hash__(self):\r\n return hash(type(self))\r\n\r\n def __str__(self):\r\n return self.__class__.__name__\r\n \r\n def make_node(self, inp1, inp2):\r\n inp1 = cuda.basic_ops.gpu_contiguous(cuda.basic_ops.as_cuda_ndarray_variable(inp1))\r\n inp2 = cuda.basic_ops.gpu_contiguous(cuda.basic_ops.as_cuda_ndarray_variable(inp2))\r\n\r\n assert inp1.dtype == \"float32\"\r\n assert inp2.dtype == \"float32\"\r\n assert inp1.ndim == 2\r\n assert inp2.ndim == 2\r\n\r\n return theano.Apply(self, [inp1, inp2], [self.output_type(inp1)()])\r\n \r\n def output_type(self, inp):\r\n return cuda.CudaNdarrayType(broadcastable=[False, False])\r\n\r\n def make_thunk(self, node, storage_map, _, _2):\r\n \r\n mod = SourceModule(open(\"binary_kernels.cu\").read())\r\n concatenate_rows_kernel = mod.get_function(\"concatenate_rows_kernel\")\r\n concatenate_cols_kernel = mod.get_function(\"concatenate_cols_kernel\")\r\n xnor_kernel = mod.get_function(\"xnor_gemm\")\r\n \r\n inputs = [storage_map[v] for v in node.inputs]\r\n outputs = [storage_map[v] for v in node.outputs]\r\n \r\n # THIS IS PROBABLY THE PART YOU ARE INTERESTED IN\r\n def thunk():\r\n \r\n # inputs\r\n A = inputs[0][0]\r\n B = inputs[1][0]\r\n \r\n # dimensions\r\n m = A.shape[0]\r\n n = A.shape[1]\r\n k = B.shape[1]\r\n assert n == B.shape[0] # Otherwise GEMM is impossible\r\n assert n%(32*16) == 0 # Concatenation and block size\r\n \r\n # output\r\n output_shape = (m, k)\r\n C = outputs[0]\r\n # only allocate if there is no previous allocation of the right size.\r\n if C[0] is None or C[0].shape != output_shape:\r\n C[0] = cuda.CudaNdarray.zeros(output_shape) \r\n \r\n # Concatenating the rows of A \r\n Ac = drv.mem_alloc(m*n*4/32)\r\n block_size = 64 \r\n block = (block_size,1,1)\r\n grid = (m*n/(block_size*32)+1,1)\r\n concatenate_rows_kernel(A,Ac, np.intc(m*n/32), block= block, grid=grid)\r\n \r\n # Concatenating the columns of B\r\n Bc = drv.mem_alloc(n*k*4/32) \r\n block_size = 64 \r\n block = (block_size,1,1)\r\n grid = (k/block_size+1,1)\r\n concatenate_cols_kernel(B,Bc, np.intc(n), np.intc(k), block= block, grid=grid)\r\n \r\n # Launching xnor_kernel\r\n block_size = 16\r\n block = (block_size,block_size,1)\r\n grid = (k / block_size + 1, m / block_size + 1) # better too many blocks than too little\r\n xnor_kernel(Ac,Bc,C[0], np.intc(m), np.intc(n/32.), np.intc(k), block= block, grid=grid)\r\n \r\n thunk.inputs = inputs\r\n thunk.outputs = outputs\r\n thunk.lazy = False\r\n\r\n return thunk\r\n \r\nxnor_gemm = XnorGemm()\r\n \r\ndef SignNumpy(x):\r\n return np.float32(2.*np.greater_equal(x,0)-1.)\r\n\r\ndef SignTheano(x):\r\n return T.cast(2.*T.ge(x,0)-1., theano.config.floatX)\r\n\r\n# A custom Lasagne dense layer using our GPU kernels.\r\nclass DenseLayer(lasagne.layers.DenseLayer):\r\n\r\n def __init__(self, incoming, num_units, kernel=\"theano\", **kwargs):\r\n \r\n self.kernel = kernel\r\n super(DenseLayer, self).__init__(incoming, num_units, **kwargs)\r\n \r\n def get_output_for(self, input, **kwargs):\r\n if input.ndim > 2:\r\n # if the input has more than two dimensions, flatten it into a\r\n # batch of feature vectors.\r\n input = input.flatten(2)\r\n \r\n if self.kernel == \"baseline\":\r\n activation = gemm(input, self.W)\r\n \r\n if self.kernel == \"theano\":\r\n activation = T.dot(input, self.W)\r\n \r\n if self.kernel == \"xnor\":\r\n activation = xnor_gemm(input, self.W)\r\n \r\n if self.b is not None:\r\n activation = activation + self.b.dimshuffle('x', 0)\r\n return self.nonlinearity(activation)\r\n \r\n# Test suite\r\nif __name__ == \"__main__\": \r\n # N = 8192\r\n N = 4096\r\n m = N\r\n n = N\r\n k = N\r\n # m = 784\r\n # n = 512 \r\n # k = 10\r\n \r\n A = T.fmatrix()\r\n B = T.fmatrix()\r\n dot1 = theano.function([A,B], T.dot(A, B))\r\n dot2 = theano.function([A,B], host_from_gpu(gemm(A, B)))\r\n dot3 = theano.function([A,B], host_from_gpu(xnor_gemm(A,B)))\r\n \r\n # Generating random BINARY matrices\r\n a = SignNumpy(np.random.randn(m, n))\r\n b = SignNumpy(np.random.randn(n, k))\r\n # a = np.float32(np.random.randn(m, n))\r\n # b = np.float32(np.random.randn(n, k))\r\n\r\n start_time = time.time()\r\n c1 = dot1(a,b)\r\n dot1_duration = time.time() - start_time\r\n # print c1[0][0]\r\n print(\"Theano time = \"+str(dot1_duration)+\"s\")\r\n \r\n start_time = time.time()\r\n c3 = dot3(a,b)\r\n dot3_duration = time.time() - start_time\r\n # print c3[0][0]\r\n print(\"XNOR kernel time = \"+str(dot3_duration)+\"s\")\r\n \r\n start_time = time.time()\r\n c2 = dot2(a,b)\r\n dot2_duration = time.time() - start_time\r\n # print c2[0][0]\r\n print(\"Baseline kernel time = \"+str(dot2_duration)+\"s\")\r\n \r\n # Asserting the kernels are giving the same output\r\n print \"np.mean(np.absolute(c1-c3)) = \" + str(np.mean(np.absolute(c1-c3)))\r\n print \"np.mean(np.absolute(c2-c3)) = \" + str(np.mean(np.absolute(c2-c3)))\r\n print \"np.allclose(c1, c3) = \" + str(np.allclose(c1, c3))\r\n print \"np.allclose(c2, c3) = \" + str(np.allclose(c2, c3))\r\n\r\n " }, { "alpha_fraction": 0.5348234176635742, "alphanum_fraction": 0.5551844239234924, "avg_line_length": 29.756284713745117, "blob_id": "bc74c3dc6c1800d23c2206b7cf4cecc0eb789bbd", "content_id": "67f742d435933f7293f6fa2eae568d9be9dfc746", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28142, "license_type": "permissive", "max_line_length": 138, "num_lines": 915, "path": "/code/lipreading/buildNetworks.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# ResNet-50 network from the paper:\n# \"Deep Residual Learning for Image Recognition\"\n# http://arxiv.org/pdf/1512.03385v1.pdf\n# License: see https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE\n\n# Download pretrained weights from:\n# https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/resnet50.pkl\n\nimport lasagne\nfrom lasagne.utils import floatX\nfrom lasagne.layers import InputLayer\nfrom lasagne.layers import Conv2DLayer as ConvLayer\nfrom lasagne.layers import BatchNormLayer\nfrom lasagne.layers import Pool2DLayer as PoolLayer\nfrom lasagne.layers import NonlinearityLayer\nfrom lasagne.layers import ElemwiseSumLayer\nfrom lasagne.layers import DenseLayer\nfrom lasagne.nonlinearities import rectify, softmax, identity\n\n\ndef build_simple_block(incoming_layer, names,\n num_filters, filter_size, stride, pad,\n use_bias=False, nonlin=rectify):\n \"\"\"Creates stacked Lasagne layers ConvLayer -> BN -> (ReLu)\n\n Parameters:\n ----------\n incoming_layer : instance of Lasagne layer\n Parent layer\n\n names : list of string\n Names of the layers in block\n\n num_filters : int\n Number of filters in convolution layer\n\n filter_size : int\n Size of filters in convolution layer\n\n stride : int\n Stride of convolution layer\n\n pad : int\n Padding of convolution layer\n\n use_bias : bool\n Whether to use bias in conlovution layer\n\n nonlin : function\n Nonlinearity type of Nonlinearity layer\n\n Returns\n -------\n tuple: (net, last_layer_name)\n net : dict\n Dictionary with stacked layers\n last_layer_name : string\n Last layer name\n \"\"\"\n net = []\n net.append((\n names[0],\n ConvLayer(incoming_layer, num_filters, filter_size, pad, stride,\n flip_filters=False, nonlinearity=None) if use_bias\n else ConvLayer(incoming_layer, num_filters, filter_size, stride, pad, b=None,\n flip_filters=False, nonlinearity=None)\n ))\n\n net.append((\n names[1],\n BatchNormLayer(net[-1][1])\n ))\n if nonlin is not None:\n net.append((\n names[2],\n NonlinearityLayer(net[-1][1], nonlinearity=nonlin)\n ))\n\n return dict(net), net[-1][0]\n\n\ndef build_residual_block(incoming_layer, ratio_n_filter=1.0, ratio_size=1.0, has_left_branch=False,\n upscale_factor=4, ix=''):\n \"\"\"Creates two-branch residual block\n\n Parameters:\n ----------\n incoming_layer : instance of Lasagne layer\n Parent layer\n\n ratio_n_filter : float\n Scale factor of filter bank at the input of residual block\n\n ratio_size : float\n Scale factor of filter size\n\n has_left_branch : bool\n if True, then left branch contains simple block\n\n upscale_factor : float\n Scale factor of filter bank at the output of residual block\n\n ix : int\n Id of residual block\n\n Returns\n -------\n tuple: (net, last_layer_name)\n net : dict\n Dictionary with stacked layers\n last_layer_name : string\n Last layer name\n \"\"\"\n simple_block_name_pattern = ['res%s_branch%i%s', 'bn%s_branch%i%s', 'res%s_branch%i%s_relu']\n\n net = {}\n\n # right branch\n net_tmp, last_layer_name = build_simple_block(\n incoming_layer, map(lambda s: s % (ix, 2, 'a'), simple_block_name_pattern),\n int(lasagne.layers.get_output_shape(incoming_layer)[1]*ratio_n_filter), 1, int(1.0/ratio_size), 0)\n net.update(net_tmp)\n\n net_tmp, last_layer_name = build_simple_block(\n net[last_layer_name], map(lambda s: s % (ix, 2, 'b'), simple_block_name_pattern),\n lasagne.layers.get_output_shape(net[last_layer_name])[1], 3, 1, 1)\n net.update(net_tmp)\n\n net_tmp, last_layer_name = build_simple_block(\n net[last_layer_name], map(lambda s: s % (ix, 2, 'c'), simple_block_name_pattern),\n lasagne.layers.get_output_shape(net[last_layer_name])[1]*upscale_factor, 1, 1, 0,\n nonlin=None)\n net.update(net_tmp)\n\n right_tail = net[last_layer_name]\n left_tail = incoming_layer\n\n # left branch\n if has_left_branch:\n net_tmp, last_layer_name = build_simple_block(\n incoming_layer, map(lambda s: s % (ix, 1, ''), simple_block_name_pattern),\n int(lasagne.layers.get_output_shape(incoming_layer)[1]*4*ratio_n_filter), 1, int(1.0/ratio_size), 0,\n nonlin=None)\n net.update(net_tmp)\n left_tail = net[last_layer_name]\n\n net['res%s' % ix] = ElemwiseSumLayer([left_tail, right_tail], coeffs=1)\n net['res%s_relu' % ix] = NonlinearityLayer(net['res%s' % ix], nonlinearity=rectify)\n\n return net, 'res%s_relu' % ix\n\n\ndef build_network_resnet50(input, nbClasses):\n net = {}\n net['input'] = InputLayer(shape=(None, 1, 120, 120),input_var=input)\n sub_net, parent_layer_name = build_simple_block(\n net['input'], ['conv1', 'bn_conv1', 'conv1_relu'],\n 64, 7, 3, 2, use_bias=True)\n net.update(sub_net)\n net['pool1'] = PoolLayer(net[parent_layer_name], pool_size=3, stride=2, pad=0, mode='max', ignore_border=False)\n block_size = list('abc')\n parent_layer_name = 'pool1'\n for c in block_size:\n if c == 'a':\n sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1, 1, True, 4, ix='2%s' % c)\n else:\n sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='2%s' % c)\n net.update(sub_net)\n\n block_size = list('abcd')\n for c in block_size:\n if c == 'a':\n sub_net, parent_layer_name = build_residual_block(\n net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='3%s' % c)\n else:\n sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='3%s' % c)\n net.update(sub_net)\n\n block_size = list('abcdef')\n for c in block_size:\n if c == 'a':\n sub_net, parent_layer_name = build_residual_block(\n net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='4%s' % c)\n else:\n sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='4%s' % c)\n net.update(sub_net)\n\n block_size = list('abc')\n for c in block_size:\n if c == 'a':\n sub_net, parent_layer_name = build_residual_block(\n net[parent_layer_name], 1.0/2, 1.0/2, True, 4, ix='5%s' % c)\n else:\n sub_net, parent_layer_name = build_residual_block(net[parent_layer_name], 1.0/4, 1, False, 4, ix='5%s' % c)\n net.update(sub_net)\n net['pool5'] = PoolLayer(net[parent_layer_name], pool_size=7, stride=1, pad=0,\n mode='average_exc_pad', ignore_border=False)\n net['fc1000'] = DenseLayer(net['pool5'], num_units=nbClasses, nonlinearity=None) # number output units = nbClasses (global variable)\n net['prob'] = NonlinearityLayer(net['fc1000'], nonlinearity=softmax)\n\n return net\n\n# network from Oxford & Google BBC paper\ndef build_network_google (activation, alpha, epsilon, input, nbClasses):\n # input\n cnn = lasagne.layers.InputLayer(\n shape=(None, 1, 120, 120), # 5,120,120 (5 = #frames)\n input_var=input)\n # conv 1\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # conv 2\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=256,\n filter_size=(3, 3),\n stride=(2, 2),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # conv3\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # conv 4\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # conv 5\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # FC layer\n #cnn = lasagne.layers.DenseLayer(\n # cnn,\n # nonlinearity=lasagne.nonlinearities.identity,\n # num_units=128)\n # \n # cnn = lasagne.layers.NonlinearityLayer(\n # cnn,\n # nonlinearity=activation)\n \n cnn = lasagne.layers.DenseLayer(\n cnn,\n nonlinearity=lasagne.nonlinearities.softmax,\n num_units=nbClasses)\n\n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n \n return cnn\n\n\n# default network for cifar10\ndef build_network_cifar10 (activation, alpha, epsilon, input, nbClasses):\n cnn = lasagne.layers.InputLayer(\n shape=(None, 1, 120, 120),\n input_var=input)\n \n # 128C3-128C3-P2\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # 256C3-256C3-P2\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=256,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=256,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # 512C3-512C3-P2\n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = lasagne.layers.Conv2DLayer(\n cnn,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # print(cnn.output_shape)\n \n # # 1024FP-1024FP-10FP\n # cnn = lasagne.layers.DenseLayer(\n # cnn,\n # nonlinearity=lasagne.nonlinearities.identity,\n # num_units=1024)\n #\n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n #\n # cnn = lasagne.layers.NonlinearityLayer(\n # cnn,\n # nonlinearity=activation)\n \n # cnn = lasagne.layers.DenseLayer(\n # cnn,\n # nonlinearity=lasagne.nonlinearities.identity,\n # num_units=1024)\n #\n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n #\n # cnn = lasagne.layers.NonlinearityLayer(\n # cnn,\n # nonlinearity=activation)\n \n cnn = lasagne.layers.DenseLayer(\n cnn,\n nonlinearity=lasagne.nonlinearities.softmax,\n num_units=nbClasses)\n \n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n \n return cnn\n\n\n################## BINARY NETWORKS ###################\n\nimport time\nfrom collections import OrderedDict\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\n\nimport lasagne\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom theano.scalar.basic import UnaryScalarOp, same_out_nocomplex\nfrom theano.tensor.elemwise import Elemwise\n\n# Our own rounding function, that does not set the gradient to 0 like Theano's\nclass Round3(UnaryScalarOp):\n def c_code (self, node, name, (x, ), (z, ), sub):\n return \"%(z)s = round(%(x)s);\" % locals()\n \n def grad (self, inputs, gout):\n (gz,) = gout\n return gz,\n\n\nround3_scalar = Round3(same_out_nocomplex, name='round3')\nround3 = Elemwise(round3_scalar)\n\n\ndef hard_sigmoid (x):\n return T.clip((x + 1.) / 2., 0, 1)\n\n\n# The neurons' activations binarization function\n# It behaves like the sign function during forward propagation\n# And like:\n# hard_tanh(x) = 2*hard_sigmoid(x)-1\n# during back propagation\ndef binary_tanh_unit (x):\n return 2. * round3(hard_sigmoid(x)) - 1.\n\n\ndef binary_sigmoid_unit (x):\n return round3(hard_sigmoid(x))\n\n# The weights' binarization function,\n# taken directly from the BinaryConnect github repository\n# (which was made available by his authors)\ndef binarization (W, H, binary=True, deterministic=False, stochastic=False, srng=None):\n # (deterministic == True) <-> test-time <-> inference-time\n if not binary or (deterministic and stochastic):\n # print(\"not binary\")\n Wb = W\n \n else:\n \n # [-1,1] -> [0,1]\n Wb = hard_sigmoid(W / H)\n # Wb = T.clip(W/H,-1,1)\n \n # Stochastic BinaryConnect\n if stochastic:\n \n # print(\"stoch\")\n Wb = T.cast(srng.binomial(n=1, p=Wb, size=T.shape(Wb)), theano.config.floatX)\n \n # Deterministic BinaryConnect (round to nearest)\n else:\n # print(\"det\")\n Wb = T.round(Wb)\n \n # 0 or 1 -> -1 or 1\n Wb = T.cast(T.switch(Wb, H, -H), theano.config.floatX)\n \n return Wb\n\n\n# This class extends the Lasagne DenseLayer to support BinaryConnect\nclass binary_DenseLayer(lasagne.layers.DenseLayer):\n def __init__ (self, incoming, num_units,\n binary=True, stochastic=True, H=1., W_LR_scale=\"Glorot\", **kwargs):\n \n self.binary = binary\n self.stochastic = stochastic\n \n self.H = H\n if H == \"Glorot\":\n num_inputs = int(np.prod(incoming.output_shape[1:]))\n self.H = np.float32(np.sqrt(1.5 / (num_inputs + num_units)))\n # print(\"H = \"+str(self.H))\n \n self.W_LR_scale = W_LR_scale\n if W_LR_scale == \"Glorot\":\n num_inputs = int(np.prod(incoming.output_shape[1:]))\n self.W_LR_scale = np.float32(1. / np.sqrt(1.5 / (num_inputs + num_units)))\n \n self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))\n \n if self.binary:\n super(binary_DenseLayer, self).__init__(incoming, num_units, W=lasagne.init.Uniform((-self.H, self.H)), **kwargs)\n # add the binary tag to weights\n self.params[self.W] = set(['binary'])\n \n else:\n super(binary_DenseLayer, self).__init__(incoming, num_units, **kwargs)\n \n def get_output_for (self, input, deterministic=False, **kwargs):\n \n self.Wb = binarization(self.W, self.H, self.binary, deterministic, self.stochastic, self._srng)\n Wr = self.W\n self.W = self.Wb\n \n rvalue = super(binary_DenseLayer, self).get_output_for(input, **kwargs)\n \n self.W = Wr\n \n return rvalue\n\n\n# This class extends the Lasagne Conv2DLayer to support BinaryConnect\nclass binary_Conv2DLayer(lasagne.layers.Conv2DLayer):\n def __init__ (self, incoming, num_filters, filter_size,\n binary=True, stochastic=True, H=1., W_LR_scale=\"Glorot\", **kwargs):\n \n self.binary = binary\n self.stochastic = stochastic\n \n self.H = H\n if H == \"Glorot\":\n num_inputs = int(np.prod(filter_size) * incoming.output_shape[1])\n num_units = int(\n np.prod(filter_size) * num_filters) # theoretically, I should divide num_units by the pool_shape\n self.H = np.float32(np.sqrt(1.5 / (num_inputs + num_units)))\n # print(\"H = \"+str(self.H))\n \n self.W_LR_scale = W_LR_scale\n if W_LR_scale == \"Glorot\":\n num_inputs = int(np.prod(filter_size) * incoming.output_shape[1])\n num_units = int(\n np.prod(filter_size) * num_filters) # theoretically, I should divide num_units by the pool_shape\n self.W_LR_scale = np.float32(1. / np.sqrt(1.5 / (num_inputs + num_units)))\n # print(\"W_LR_scale = \"+str(self.W_LR_scale))\n \n self._srng = RandomStreams(lasagne.random.get_rng().randint(1, 2147462579))\n \n if self.binary:\n super(binary_Conv2DLayer, self).__init__(incoming, num_filters, filter_size,\n W=lasagne.init.Uniform((-self.H, self.H)), **kwargs)\n # add the binary tag to weights\n self.params[self.W] = set(['binary'])\n else:\n super(binary_Conv2DLayer, self).__init__(incoming, num_filters, filter_size, **kwargs)\n \n def convolve (self, input, deterministic=False, **kwargs):\n \n self.Wb = binarization(self.W, self.H, self.binary, deterministic, self.stochastic, self._srng)\n Wr = self.W\n self.W = self.Wb\n \n rvalue = super(binary_Conv2DLayer, self).convolve(input, **kwargs)\n \n self.W = Wr\n \n return rvalue\n \n \ndef build_network_google_binary (activation, alpha, epsilon, input, binary, stochastic, H, W_LR_scale):\n # input\n cnn = lasagne.layers.InputLayer(\n shape=(None, 1, 120, 120), # 5,120,120 (5 = #frames)\n input_var=input)\n # conv 1\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n\n # conv 2\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=256,\n filter_size=(3, 3),\n stride=(2, 2),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n\n # conv3\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n\n # conv 4\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n\n # conv 5\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n\n # FC layer\n cnn = binary_net.DenseLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n nonlinearity=lasagne.nonlinearities.identity,\n num_units=39)\n \n #cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n \n return cnn\n\ndef build_network_cifar10_binary (activation, alpha, epsilon, input, binary, stochastic, H, W_LR_scale):\n cnn = lasagne.layers.InputLayer(\n shape=(None, 1, 120, 120),\n input_var=input)\n \n # 128C3-128C3-P2\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=128,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # 256C3-256C3-P2\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=256,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=256,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # 512C3-512C3-P2\n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n cnn = binary_net.Conv2DLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n num_filters=512,\n filter_size=(3, 3),\n pad=1,\n nonlinearity=lasagne.nonlinearities.identity)\n \n cnn = lasagne.layers.MaxPool2DLayer(cnn, pool_size=(2, 2))\n \n cnn = lasagne.layers.BatchNormLayer(\n cnn,\n epsilon=epsilon,\n alpha=alpha)\n \n cnn = lasagne.layers.NonlinearityLayer(\n cnn,\n nonlinearity=activation)\n \n # print(cnn.output_shape)\n \n # # 1024FP-1024FP-10FP\n # cnn = binary_net.DenseLayer(\n # cnn,\n # binary=binary,\n # stochastic=stochastic,\n # H=H,\n # W_LR_scale=W_LR_scale,\n # nonlinearity=lasagne.nonlinearities.identity,\n # num_units=1024)\n #\n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n #\n # cnn = lasagne.layers.NonlinearityLayer(\n # cnn,\n # nonlinearity=activation)\n \n # cnn = binary_net.DenseLayer(\n # cnn,\n # binary=binary,\n # stochastic=stochastic,\n # H=H,\n # W_LR_scale=W_LR_scale,\n # nonlinearity=lasagne.nonlinearities.identity,\n # num_units=1024)\n #\n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n #\n # cnn = lasagne.layers.NonlinearityLayer(\n # cnn,\n # nonlinearity=activation)\n \n cnn = binary_net.DenseLayer(\n cnn,\n binary=binary,\n stochastic=stochastic,\n H=H,\n W_LR_scale=W_LR_scale,\n nonlinearity=lasagne.nonlinearities.identity,\n num_units=39)\n \n # cnn = lasagne.layers.BatchNormLayer(\n # cnn,\n # epsilon=epsilon,\n # alpha=alpha)\n \n return cnn\n" }, { "alpha_fraction": 0.60317462682724, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 18.384614944458008, "blob_id": "0450f75c33317c1dbea44421639fbaf99964edd9", "content_id": "a55400e1e9ae0798e107a469247aa29ae06ae179", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "permissive", "max_line_length": 48, "num_lines": 13, "path": "/code/Experiments/Tutorials/EbenOlsen_TheanoLasagne/1 - Theano Basics/fibonnacci.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import numpy as np\nimport theano\nimport theano.tensor as T\n\n# Fn=Fn−2+Fn−1, with F1=1 and F2=1.\n\nf1 = theano.shared(1)\nf2 = theano.shared(1)\n\nf = f1 + f2\nupdates= {a:b; b:f}\nnext = theano.function([f1,f2],f,update=updates)\n[next() for i in range(0,3)]\n" }, { "alpha_fraction": 0.5487387180328369, "alphanum_fraction": 0.5845531225204468, "avg_line_length": 35.078651428222656, "blob_id": "6a6b401e531b98ef0ce30caf078d6c852af2af55", "content_id": "af5e280c7b1a93471ed3f6818272f757da129497", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3211, "license_type": "permissive", "max_line_length": 102, "num_lines": 89, "path": "/code/audioSR/Spoken-language-identification-master/ensembling/get_output_layers.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" Usage: python get_output_layers.py test|val\n\"\"\"\nimport sys\nimport caffe\nimport numpy as np\n\ncaffe.set_mode_gpu()\n\ndeploy = '../prototxt/deploy.augm_32r-2-64r-2-64r-2-128r-2-128r-2-256r-2-1024rd0.3-1024rd0.3.prototxt'\nmodel = 'augm_dropout0.3_on_augm84K-lr0.01_30K_iter_75000'\nmodel_path = '../models/' + model + '.caffemodel'\n\n\"\"\"\n####################### networks with no augmentation ##########################\nnet = caffe.Classifier(deploy, model_path)\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2, 0, 1))\nnet.blobs['data'].reshape(1, 1, 256, 858)\n\nfolder = '/home/brainstorm/caffe/Data/mnt/3/language/train/png/'\ncnt = 12320\nfile = open('../valEqual.csv', 'r')\nprob_file = open('probs/val/' + model + '.csv', 'w')\n\nfor iter in range(cnt):\n name = file.readline().split(',')[0]\n net.blobs['data'].data[...] = transformer.preprocess('data', \n caffe.io.load_image(folder + name + '.png', color=False))\n probs = net.forward()['loss'][0]\n probs = [str(x) for x in probs]\n prob_file.write(','.join(probs) + '\\n')\n \n if (iter % 100 == 0):\n print \"processed %d images\" % (iter + 1)\n\"\"\"\n\n######################### networks with augmentation ###########################\nassert sys.argv[1] in ('test', 'val')\ndataset = sys.argv[1]\naugm_cnt = 20\ncnt = 12320\n\nif (dataset == 'val'):\n folder = '/home/brainstorm/caffe/Data/mnt/3/language/train/pngaugm/'\n file = open('../valEqual.csv', 'r')\nelse:\n folder = '../test/pngaugm/'\n file = open('../testingData.csv', 'r')\n\n# sum - mean of augm_cnt versions of speech\n# log - mean of logs of augm_cnt versions of speech\n# dense - last dense layer, 1024 outputs\nprob_file_sum = open('probs/' + dataset + '/' + model + '.sum' + str(augm_cnt) + '.csv', 'w')\nprob_file_log = open('probs/' + dataset + '/' + model + '.log' + str(augm_cnt) + '.csv', 'w')\ndense_file = open('probs/' + dataset + '/'+ model + '.dense' + str(augm_cnt) + '.csv', 'w')\n\nnet = caffe.Classifier(deploy, model_path)\ntransformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})\ntransformer.set_transpose('data', (2, 0, 1))\n\nnet.blobs['data'].reshape(augm_cnt, 1, 256, 768)\nfor iter in range(cnt):\n if (dataset == 'val'):\n name = file.readline().split(',')[0]\n else:\n name = file.readline().strip()[:-4]\n X = np.zeros((augm_cnt, 1, 256, 768), dtype=np.float32)\n for index in range(augm_cnt):\n augm_path = folder + name + '.' + str(index) + '.png'\n X[index] = transformer.preprocess('data', caffe.io.load_image(augm_path, color=False))\n\n net.blobs['data'].data[...] = X\n out = net.forward()['loss']\n probs_sum = out.mean(axis=0)\n probs_log = np.log(out + 1e-7).mean(axis=0)\n dense = net.blobs['ip2new'].data\n \n probs_sum = [str(x) for x in probs_sum]\n prob_file_sum.write(','.join(probs_sum) + '\\n')\n \n probs_log = [\"%f\" % x for x in probs_log]\n prob_file_log.write(','.join(probs_log) + '\\n')\n \n for index in range(augm_cnt):\n tmp = [str(x) for x in dense[index]]\n dense_file.write(','.join(tmp) + '\\n')\n \n if (iter % 10 == 0):\n print \"processed %d images\" % (iter + 1)\n" }, { "alpha_fraction": 0.7436589598655701, "alphanum_fraction": 0.7584997415542603, "avg_line_length": 40.18888854980469, "blob_id": "cceadf943d088c9f55f15bca3c4471f54169d2f4", "content_id": "967f8944715a1a6bbd2f816c464e901901d91019", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3706, "license_type": "permissive", "max_line_length": 165, "num_lines": 90, "path": "/code/Experiments/Caffe/compile_caffe_ubuntu_14.04.sh", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# This script installs Caffe and pycaffe on Ubuntu 14.04 x64 or 14.10 x64. CPU only, multi-threaded Caffe.\n# Usage: \n# 0. Set up here how many cores you want to use during the installation:\n# By default Caffe will use all these cores.\nNUMBER_OF_CORES=8\n# 1. Execute this script, e.g. \"bash compile_caffe_ubuntu_14.04.sh\" (~30 to 60 minutes on a new Ubuntu).\n# 2. Open a new shell (or run \"source ~/.bash_profile\"). You're done. You can try \n# running \"import caffe\" from the Python interpreter to test.\n\n#http://caffe.berkeleyvision.org/install_apt.html : (general install info: http://caffe.berkeleyvision.org/installation.html)\ncd\nsudo apt-get update\n#sudo apt-get upgrade -y # If you are OK getting prompted\nsudo DEBIAN_FRONTEND=noninteractive apt-get upgrade -y -q -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" # If you are OK with all defaults\n\nsudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev\nsudo apt-get install -y --no-install-recommends libboost-all-dev\nsudo apt-get install -y libatlas-base-dev \nsudo apt-get install -y python-dev \nsudo apt-get install -y python-pip git\n\n# For Ubuntu 14.04\nsudo apt-get install -y libgflags-dev libgoogle-glog-dev liblmdb-dev protobuf-compiler \n\n# LMDB\n# https://github.com/BVLC/caffe/issues/2729: Temporarily broken link to the LMDB repository #2729\n#git clone https://gitorious.org/mdb/mdb.git\n#cd mdb/libraries/liblmdb\n#make && make install \n\ngit clone https://github.com/LMDB/lmdb.git \ncd lmdb/libraries/liblmdb\nsudo make \nsudo make install\n\n# More pre-requisites \nsudo apt-get install -y cmake unzip doxygen\nsudo apt-get install -y protobuf-compiler\nsudo apt-get install -y libffi-dev python-dev build-essential\nsudo pip install lmdb\nsudo pip install numpy\nsudo apt-get install -y python-numpy\nsudo apt-get install -y gfortran # required by scipy\nsudo pip install scipy # required by scikit-image\nsudo apt-get install -y python-scipy # in case pip failed\nsudo apt-get install -y python-nose\nsudo pip install scikit-image # to fix https://github.com/BVLC/caffe/issues/50\n\n\n# Get caffe (http://caffe.berkeleyvision.org/installation.html#compilation)\ncd\nmkdir caffe\ncd caffe\nwget https://github.com/BVLC/caffe/archive/master.zip\nunzip -o master.zip\ncd caffe-master\n\n# Prepare Python binding (pycaffe)\ncd python\nfor req in $(cat requirements.txt); do sudo pip install $req; done\necho \"export PYTHONPATH=$(pwd):$PYTHONPATH \" >> ~/.bash_profile # to be able to call \"import caffe\" from Python after reboot\nsource ~/.bash_profile # Update shell \ncd ..\n\n# Compile caffe and pycaffe\ncp Makefile.config.example Makefile.config\nsed -i '8s/.*/CPU_ONLY := 1/' Makefile.config # Line 8: CPU only\nsudo apt-get install -y libopenblas-dev\nsed -i '33s/.*/BLAS := open/' Makefile.config # Line 33: to use OpenBLAS\n# Note that if one day the Makefile.config changes and these line numbers change, we're screwed\n# Maybe it would be best to simply append those changes at the end of Makefile.config \necho \"export OPENBLAS_NUM_THREADS=($NUMBER_OF_CORES)\" >> ~/.bash_profile \nmkdir build\ncd build\ncmake ..\ncd ..\nmake all -j$NUMBER_OF_CORES # 4 is the number of parallel threads for compilation: typically equal to number of physical cores\nmake pycaffe -j$NUMBER_OF_CORES\nmake test\nmake runtest\n#make matcaffe\nmake distribute\n\n# Bonus for other work with pycaffe\nsudo pip install pydot\nsudo apt-get install -y graphviz\nsudo pip install scikit-learn\n\n# At the end, you need to run \"source ~/.bash_profile\" manually or start a new shell to be able to do 'python import caffe', \n# because one cannot source in a bash script. (http://stackoverflow.com/questions/16011245/source-files-in-a-bash-script)" }, { "alpha_fraction": 0.6127143502235413, "alphanum_fraction": 0.6357172727584839, "avg_line_length": 33.82038879394531, "blob_id": "5ac6d9eead6d0b4f83d7e324c18a33401918bcd2", "content_id": "05625ba25e148e1e94f05fd3bf1f97b8a70a43fd", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7173, "license_type": "permissive", "max_line_length": 139, "num_lines": 206, "path": "/code/Experiments/Lasagne_examples/examples/ResNets/resnet50/resnet50_evaluateNetwork.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import urllib\nimport io\nimport skimage.transform\nimport matplotlib\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nplt.rcParams['figure.figsize'] = 8, 6\nimport argparse\nimport time\nimport pickle\nimport numpy as np\nimport theano\nimport lasagne\nimport os\n\n\nparser = argparse.ArgumentParser(description=\"Getting top 5 classes of images\")\n\nadd_arg = parser.add_argument\n\nadd_arg(\"-i\", \"--input_image\", help=\"Input image\")\nadd_arg(\"-m\", \"--model_file\", help=\"Model pickle file\")\n\nargs = parser.parse_args()\n\n# this imported file contains build_model(), which constructs the network structure that you van fill using the pkl file\n# to generate the pkl file, you need to run the main function in resnet50CaffeToLasagne_ImageNet,\n# which populates the network from caffe, gets the classes and the mean image, and stores those in a pkl file\nfrom resnet50LasagneModel import *\n\ndef load_model(model_pkl_file):\n if not os.path.exists(model_pkl_file): print(\"This pkl file does not exist! Please run 'resnet50CaffeToLasagne' first to generate it.\")\n model = pickle.load(open(model_pkl_file,'rb'))\n net = build_model()\n lasagne.layers.set_all_param_values(net['prob'], model['values'])\n return net, model['mean_image'], model['synset_words']\n\ndef prep_image(fname, mean_values):\n t0 = time.time()\n ext = fname.split('.')[-1]\n im = plt.imread(fname, ext)\n h, w, _ = im.shape\n if h < w:\n im = skimage.transform.resize(im, (256, w*256/h), preserve_range=True)\n else:\n im = skimage.transform.resize(im, (h*256/w, 256), preserve_range=True)\n h, w, _ = im.shape\n im = im[h//2-112:h//2+112, w//2-112:w//2+112]\n # h, w, _ = im.shape\n # im = skimage.transform.resize(im, (224, 224), preserve_range=True)\n h, w, _ = im.shape\n rawim = np.copy(im).astype('uint8')\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n im = im[::-1, :, :]\n im = im - mean_values\n t1 = time.time()\n print \"Time taken in preparing the image : {}\".format(t1 - t0)\n return rawim, im[np.newaxis].astype('float32')\n\ndef get_net_fun(pkl_model):\n\tnet, mean_img, synset_words = load_model(pkl_model)\n\n\tget_class_prob = theano.function([net['input'].input_var], lasagne.layers.get_output(net['prob'],deterministic=True))\n\n\tdef print_top5(im_path):\n\t\traw_im, im = prep_image(im_path, mean_img)\n\t\tprob = get_class_prob(im)[0]\n\t\tres = sorted(zip(synset_words, prob), key=lambda t: t[1], reverse=True)[:5]\n\t\tfor c, p in res:\n\t\t\tprint ' ', c, p\n\n\treturn get_class_prob, print_top5\n\ndef get_feature_extractor(pkl_model, layer_name):\n\tnet, mean_img, synset_words = load_model(pkl_model)\n\tlayer_output = theano.function([net['input'].input_var], lasagne.layers.get_output(net[layer_name],deterministic=True))\n\n\tdef feature_extractor(im_path):\n\t\traw_im, im = prep_image(im_path, mean_img)\n\t\treturn layer_output(im)[0]\n\n\treturn feature_extractor\n\n\n# The following functions are just for testing out (with ImageNet examples).\ndef download_images (url='http://www.image-net.org/challenges/LSVRC/2012/ori_urls/indexval.html'):\n # Read ImageNet synset\n \n # Download some image urls for recognition\n print(\"getting urls...\")\n index = urllib.urlopen(url).read()\n image_urls = index.split('<br>')\n np.random.seed(23)\n np.random.shuffle(image_urls)\n image_urls = image_urls[:10] # used to be 100\n \n return image_urls\n\n# Lets take five images and compare prediction of Lasagne with Caffe\ndef test_lasagne_ImageNet (classes, image_urls, mean_values, net, net_caffe):\n n = 5\n m = 5\n i = 0\n for url in image_urls:\n print url ### print url to show progress ###\n # try:\n rawim, im = prep_image(url, mean_values)\n # except:\n # print 'Failed to download'\n # continue\n \n prob_lasangne = np.array(lasagne.layers.get_output(net['prob'], im, deterministic=True).eval())[0]\n prob_caffe = net_caffe.forward_all(data=im)['prob'][0]\n \n print 'Lasagne:'\n res = sorted(zip(classes, prob_lasangne), key=lambda t: t[1], reverse=True)[:n]\n for c, p in res:\n print ' ', c, p\n \n print 'Caffe:'\n res = sorted(zip(classes, prob_caffe), key=lambda t: t[1], reverse=True)[:n]\n for c, p in res:\n print ' ', c, p\n \n plt.figure()\n plt.imshow(rawim.astype('uint8'))\n plt.axis('off')\n plt.show()\n \n i += 1\n if i == m:\n break\n \n print '\\n\\n'\n \n# Image loader\ndef prep_image (url, mean_values, fname=None):\n if fname is None:\n ext = url.split('.')[-1]\n im = plt.imread(io.BytesIO(urllib.urlopen(url).read()), ext)\n else:\n ext = fname.split('.')[-1]\n im = plt.imread(fname, ext)\n h, w, _ = im.shape\n if h < w:\n im = skimage.transform.resize(im, (256, w * 256 / h), preserve_range=True)\n else:\n im = skimage.transform.resize(im, (h * 256 / w, 256), preserve_range=True)\n h, w, _ = im.shape\n im = im[h // 2 - 112:h // 2 + 112, w // 2 - 112:w // 2 + 112]\n rawim = np.copy(im).astype('uint8')\n im = np.swapaxes(np.swapaxes(im, 1, 2), 0, 1)\n im = im[::-1, :, :]\n im = im - mean_values\n return rawim, floatX(im[np.newaxis])\n\n\nif __name__ == \"__main__\":\n\tprint \"Compiling functions...\"\n\tget_prob, print_top5 = get_net_fun(args.model_file) # expects pkl model\n\tt0 = time.clock()\n\tprint_top5(args.input_image)\n\tt1 = time.clock()\n\tprint(\"Total time taken {:.4f}\".format(t1 - t0))\n\n\t# print \"Compiling function for getting conv1 ....\"\n\t# feature_extractor = get_feature_extractor(args.model_file, 'conv1')\n\t# t0 = time.time()\n\t# print feature_extractor(args.input_image).shape\n\t# t1 = time.time()\n\t# print(\"Total time taken {:.4f}\".format(t1 - t0))\n #\n\t# print \"Compiling function for getting res2c ....\"\n\t# feature_extractor = get_feature_extractor(args.model_file, 'res2c')\n\t# t0 = time.time()\n\t# print feature_extractor(args.input_image).shape\n\t# t1 = time.time()\n\t# print(\"Total time taken {:.4f}\".format(t1 - t0))\n #\n\t# print \"Compiling function for getting res3d ....\"\n\t# feature_extractor = get_feature_extractor(args.model_file, 'res3d')\n\t# t0 = time.time()\n\t# print feature_extractor(args.input_image).shape\n\t# t1 = time.time()\n\t# print(\"Total time taken {:.4f}\".format(t1 - t0))\n #\n\t# print \"Compiling function for getting conv res4f ....\"\n\t# feature_extractor = get_feature_extractor(args.model_file, 'res4f')\n\t# t0 = time.time()\n\t# print feature_extractor(args.input_image).shape\n\t# t1 = time.time()\n\t# print(\"Total time taken {:.4f}\".format(t1 - t0))\n # \n\t# print \"Compiling function for getting conv res5c ....\"\n\t# feature_extractor = get_feature_extractor(args.model_file, 'res5c')\n\t# t0 = time.time()\n\t# print feature_extractor(args.input_image).shape\n\t# t1 = time.time()\n\t# print(\"Total time taken {:.4f}\".format(\n \n \n \n# Usage examples\n# first, generate the pkl model: 'python resnet50CaffeToLasagne.py'\n# then, evaluate the model: 'python resnet50_evaluateNetwork.py -i indianElephant.jpeg -m resnet50imageNet.pkl'\n# -> this gives the 5 most probable classes of the image 'indianElephant.jpeg'\n" }, { "alpha_fraction": 0.736952006816864, "alphanum_fraction": 0.7411273717880249, "avg_line_length": 35.92307662963867, "blob_id": "1ff943fb088acb037e4ca2e10cc62c179dbc0b3e", "content_id": "f9435d909502ee47c0c4f2dfad2b99b0e8becd19", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 479, "license_type": "permissive", "max_line_length": 175, "num_lines": 13, "path": "/code/audioSR/readData.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import os\nimport sys\n\ndef unpickle(file):\n import cPickle\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\nprint(unpickle(os.path.expanduser('/home/matthijs/Documents/Dropbox/_MyDocs/_ku_leuven/Master_2/Thesis/convNets/code/audioSR/KGP-ASR/TIMIT_Alphabet.pkl')))\n\nprint(unpickle(os.path.expanduser('/home/matthijs/Documents/Dropbox/_MyDocs/_ku_leuven/Master_2/Thesis/convNets/code/audioSR/KGP-ASR/TIMIT_data_prepared_for_CTC.pkl')).keys())" }, { "alpha_fraction": 0.5406796932220459, "alphanum_fraction": 0.5555098056793213, "avg_line_length": 32.24657440185547, "blob_id": "14def0047b34f64d12cb1bdef841448202d619ee", "content_id": "48c240a7e61480d0db54e58b695a9cc0eb663df6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4855, "license_type": "permissive", "max_line_length": 113, "num_lines": 146, "path": "/code/audioSR/Spoken-language-identification-master/theano/plot.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys\nimport argparse\nimport os\n\n\n#parsing arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--plot', type=str, default='plot.png', help='plotfile name with .png')\nparser.add_argument('--log', type=str, default='log.txt', help='log file name')\nparser.add_argument('--winVal', type=int, default='200', help='window for Val')\nparser.add_argument('--winTrain', type=int, default='200', help='window for Train')\nparser.add_argument('--no-legend', dest='legend', action='store_false')\nparser.add_argument('--no-accuracy', dest='accuracy', action='store_false')\nparser.add_argument('--no-loss', dest='loss', action='store_false')\nparser.add_argument('--start_epoch', type=float, default=-1.0, help='start plotting from that epoch')\nparser.set_defaults(loss=True)\nparser.set_defaults(legend=True)\nparser.set_defaults(accuracy=True)\n\nargs = parser.parse_args()\n\nplotname = args.plot\nwindowVal = args.winVal\nwindowTrain = args.winTrain\naccuracy = []\n\n\ndef movingAverage(loss, window):\n mas = []\n for i in range(len(loss)):\n j = i - window + 1\n if (j < 0):\n j = 0\n sum = 0.0\n for k in range(window):\n sum += loss[j + k]\n mas.append(sum / window)\n return mas\n\n\ndef plotTrainVal(filename, index, plotLabel):\n valx = []\n valy = []\n trainx = []\n trainy = []\n train_accuracyx = []\n train_accuracyy = []\n val_accuracyx = []\n val_accuracyy = []\n \n with open(filename, 'r') as logfile: \n for st in logfile.readlines():\n head = st.split('\\t')[0].strip()\n\n if (head[:7] == 'testing' or head[:8] == 'training'):\n iteration_expr = head[head.find(':')+1:]\n divpos = iteration_expr.find('/')\n first = iteration_expr[:divpos]\n iterations_per_epoch = float(iteration_expr[divpos+1:])\n dotpos = first.find('.')\n epoch = float(first[:dotpos])\n iteration = float(first[dotpos+1:])\n x = epoch + iteration / iterations_per_epoch\n \n st_loss = st[st.find(\"avg_loss\"):]\n cur_loss = float(st_loss[st_loss.find(':')+1:st_loss.find('\\t')])\n \n if (head[:7] == 'testing'):\n valx.append(x)\n valy.append(cur_loss)\n else:\n trainx.append(x)\n trainy.append(cur_loss)\n \n if st.strip()[:8] == \"accuracy\":\n cur_accuracy = float(st[st.find(':')+1:st.find(\"percent\")]) / 100.0\n if (len(train_accuracyx) > len(val_accuracyx)):\n val_accuracyx.append(valx[-1])\n val_accuracyy.append(cur_accuracy)\n else:\n train_accuracyx.append(trainx[-1])\n train_accuracyy.append(cur_accuracy)\n\n while(len(valx) > 0 and valx[0] < args.start_epoch):\n valx = valx[1:]\n valy = valy[1:]\n\n while(len(trainx) > 0 and trainx[0] < args.start_epoch):\n trainx = trainx[1:]\n trainy = trainy[1:]\n\n\n #window config\n wndVal = min(windowVal, int(0.8 * len(valy)))\n wndTrain = min(windowTrain, int(0.8 * len(trainy)))\n \n print \"Train length: \", len(trainy), \" \\t\\t window: \", wndTrain\n print \"Val length: \", len(valy), \" \\t\\t window: \", wndVal\n \n #movAvg and correcting length\n #valy = movingAverage(valy, wndVal)\n #trainy = movingAverage(trainy, wndTrain)\n #valx = valx[:len(valy)]\n #trainx = trainx[:len(trainy)]\n \n\n #plotting\n greenDiff = 50\n redBlueDiff = 50\n \n if (args.loss):\n plt.plot(trainx, trainy, '#00' + hex(index * greenDiff)[2:] \n + hex(256 - index * redBlueDiff)[2:],\n label=plotLabel + \" train\")\n plt.hold(True)\n\n plt.plot(valx, valy, '#' + hex(256 - index * redBlueDiff)[2:] \n + hex(index * greenDiff)[2:] + '00',\n label=plotLabel + \" validation\")\n plt.hold(True)\n \n if (args.accuracy):\n plt.plot(train_accuracyx, train_accuracyy, '#000000',\n label=plotLabel + \" train_accuracy\")\n plt.hold(True)\n\n plt.plot(val_accuracyx, val_accuracyy, '#00FF00',\n label=plotLabel + \" val_accuracy\")\n plt.hold(True)\n \n print \"plot index =\", index\n for (x, y) in zip(val_accuracyx, val_accuracyy):\n print \"\\tepoch = %.0f, accuracy = %f\" % (x - 1, y)\n print '\\tMax: %f // Epoch: %d' % (max(val_accuracyy), val_accuracyx[val_accuracyy.index(max(val_accuracyy))])\n\n\nplotTrainVal(args.log, 1, args.log)\n\n\nif (args.legend):\n plt.legend(loc='upper right', fontsize='x-small')\nplt.gcf().savefig(plotname)\n\n" }, { "alpha_fraction": 0.6032803654670715, "alphanum_fraction": 0.628395676612854, "avg_line_length": 29.03076934814453, "blob_id": "3541a7bc1c555fe2ef583beb9803bd2c60089d98", "content_id": "2e68f37a8a378d29fed93b68b55752e3342972ce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1951, "license_type": "permissive", "max_line_length": 84, "num_lines": 65, "path": "/code/audioSR/Spoken-language-identification-master/get_score_from_probabilities.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" USAGE: python get_score_from_probabilities.py --prediction= --anwser=\n prediction file may have less lines\n\"\"\"\nimport sys\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--prediction', type=str)\nparser.add_argument('--answer', type=str, default='valDataNew.csv')\nargs = parser.parse_args()\nprint args\n\n\n# info about classes\nfile = open('trainingData.csv')\ndata = file.readlines()[1:]\nlangs = set()\nfor line in data:\n filepath, language = line.split(',')\n language = language.strip()\n langs.add(language)\nlangs = sorted(langs)\n\n\nprediction_file = open(args.prediction, 'r')\nprediction_lines = prediction_file.readlines()\nanswer_file = open(args.answer, 'r')\nanswer_lines = answer_file.readlines()\ncnt = len(prediction_lines)\ntop_coder_score = 0.0\ncorrect = 0\n\nwrong_answers = open('wrong_answers.txt', 'w')\n\nfor iter in range(cnt):\n st = answer_lines[iter]\n (name, label) = st.split(',')\n label = int(label)\n\n out = prediction_lines[iter].split(',')\n out = [float(x) for x in out]\n pred = [(x, it) for it, x in enumerate(out)]\n pred = sorted(pred, reverse=True)\n\n if (pred[0][1] == label):\n correct += 1\n top_coder_score = top_coder_score + 1000\n elif (pred[1][1] == label):\n #correct += 1\n top_coder_score = top_coder_score + 400\n elif (pred[2][1] == label): \n #correct += 1\n top_coder_score = top_coder_score + 160\n\n if (pred[0][1] != label):\n print >> wrong_answers, answer_lines[iter] + prediction_lines[iter]\n \n if ((iter + 1) % 100 == 0):\n print >> sys.stderr, \"processed %d / %d images\" % (iter + 1, cnt)\n print >> sys.stderr, \"expected score:\", top_coder_score / (iter + 1) * 35200\n\nprint >> sys.stderr, \"Final score: \", top_coder_score, \" / \", cnt, \"000\"\nprint >> sys.stderr, \"expected score:\", top_coder_score / cnt * 35200\nprint >> sys.stderr, \"Accuracy: \", 100.0 * correct / cnt" }, { "alpha_fraction": 0.6249746084213257, "alphanum_fraction": 0.6470389366149902, "avg_line_length": 33.44055938720703, "blob_id": "86c96724c75e021937d1be266d31067a64cc977a", "content_id": "8efb750cdec92d06cee8d5518e372a66c5442b4c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14775, "license_type": "permissive", "max_line_length": 149, "num_lines": 429, "path": "/code/lipreading/lipreadingTCDTIMIT_binary.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\nprint(sys.path)\n\nimport os\nimport time\n\nimport numpy as np\n\nnp.random.seed(1234) # for reproducibility?\n\nimport lasagne\nimport lasagne.layers\nfrom lasagne.layers import count_params\nfrom lasagne.updates import nesterov_momentum\n\nos.environ[\"THEANO_FLAGS\"] = \"cuda.root=/usr/local/cuda,device=gpu,floatX=float32\"\n# specifying the gpu to use\nimport theano.sandbox.cuda\ntheano.sandbox.cuda.use('gpu1')\nimport theano\nimport theano.tensor as T\nfrom theano import function, config, shared, sandbox\n\n\n\n# from http://blog.christianperone.com/2015/08/convolutional-neural-networks-and-feature-extraction-with-python/\n# import matplotlib\n# import matplotlib.pyplot as plt\n# import matplotlib.cm as cm\nimport cPickle as pickle\nimport gzip\nimport numpy as np\n\n# from nolearn.lasagne import NeuralNet\n# from nolearn.lasagne import visualize\n# from sklearn.metrics import classification_report\n# from sklearn.metrics import confusion_matrix\n\nimport logging\nfrom theano.compat.six.moves import xrange\nfrom pylearn2.datasets import cache, dense_design_matrix\nfrom pylearn2.expr.preprocessing import global_contrast_normalize\nfrom pylearn2.utils import contains_nan\nfrom pylearn2.utils import serial\nfrom pylearn2.utils import string_utils\nfrom collections import OrderedDict\n\n\n_logger = logging.getLogger(__name__)\n\n# User - created files\nimport train_lipreadingTCDTIMIT # load training functions\nimport datasetClass # load the binary dataset in proper format\nimport buildNetworks\n# import binary_net\n\ndef main ():\n \n # BN parameters\n batch_size = 50\n print(\"batch_size = \" + str(batch_size))\n # alpha is the exponential moving average factor\n alpha = .1\n print(\"alpha = \" + str(alpha))\n epsilon = 1e-4\n print(\"epsilon = \" + str(epsilon))\n\n # BinaryOut\n activation = binary_tanh_unit\n print(\"activation = binary_tanh_unit\")\n # activation = binary_sigmoid_unit\n # print(\"activation = binary_sigmoid_unit\")\n\n # BinaryConnect\n binary = True\n print(\"binary = \" + str(binary))\n stochastic = False\n print(\"stochastic = \" + str(stochastic))\n # (-H,+H) are the two binary values\n # H = \"Glorot\"\n H = 1.\n print(\"H = \" + str(H))\n # W_LR_scale = 1.\n W_LR_scale = \"Glorot\" # \"Glorot\" means we are using the coefficients from Glorot's paper\n print(\"W_LR_scale = \" + str(W_LR_scale))\n\n\n # Training parameters\n num_epochs = 500\n print(\"num_epochs = \" + str(num_epochs))\n\n # Decaying LR\n LR_start = 0.002\n print(\"LR_start = \" + str(LR_start))\n LR_fin = 0.0000003\n print(\"LR_fin = \" + str(LR_fin))\n LR_decay = (LR_fin / LR_start) ** (1. / num_epochs)\n print(\"LR_decay = \" + str(LR_decay))\n # BTW, LR decay might good for the BN moving average...\n\n shuffle_parts = 1\n print(\"shuffle_parts = \" + str(shuffle_parts))\n\n print('Loading TCDTIMIT dataset...')\n database_binary_location = os.path.join(os.path.expanduser('~/TCDTIMIT/database_binary'))\n train_set, valid_set, test_set = load_dataset(database_binary_location, 0.8,0.1,0.1) #location, %train, %valid, %test\n\n print(\"the number of training examples is: \", len(train_set.X))\n print(\"the number of valid examples is: \", len(valid_set.X))\n print(\"the number of test examples is: \", len(test_set.X))\n\n print('Building the CNN...')\n\n # Prepare Theano variables for inputs and targets\n input = T.tensor4('inputs')\n target = T.matrix('targets')\n LR = T.scalar('LR', dtype=theano.config.floatX)\n\n\n # get the network structure\n #cnn = buildNetworks.build_network_cifar10_binary(activation, alpha, epsilon, input, binary, stochastic, H, W_LR_scale) # 7176231 params\n cnn = buildNetworks.build_network_google_binary(activation, alpha, epsilon, input, binary, stochastic, H, W_LR_scale) # 7176231 params\n\n\n # print het amount of network parameters\n print(\"The number of parameters of this network: \",lasagne.layers.count_params(cnn))\n\n\n # get output layer, for calculating loss etc\n train_output = lasagne.layers.get_output(cnn, deterministic=False)\n\n # squared hinge loss\n loss = T.mean(T.sqr(T.maximum(0., 1. - target * train_output)))\n\n\n\n if binary:\n # W updates\n W = lasagne.layers.get_all_params(cnn, binary=True)\n W_grads = compute_grads(loss, cnn)\n updates = lasagne.updates.adam(loss_or_grads=W_grads, params=W, learning_rate=LR)\n updates = clipping_scaling(updates, cnn)\n \n # other parameters updates\n params = lasagne.layers.get_all_params(cnn, trainable=True, binary=False)\n updates = OrderedDict(\n updates.items() + lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR).items())\n\n else:\n params = lasagne.layers.get_all_params(cnn, trainable=True)\n updates = lasagne.updates.adam(loss_or_grads=loss, params=params, learning_rate=LR)\n\n\n test_output = lasagne.layers.get_output(cnn, deterministic=True)\n test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))\n test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)), dtype=theano.config.floatX)\n\n # Compile a function performing a training step on a mini-batch (by giving the updates dictionary)\n # and returning the corresponding training loss:\n train_fn = theano.function([input, target, LR], loss, updates=updates)\n\n # Compile a second function computing the validation loss and accuracy:\n val_fn = theano.function([input, target], [test_loss, test_err])\n\n print('Training...')\n\n train_lipreadingTCDTIMIT.train(\n train_fn, val_fn,\n cnn,\n batch_size,\n LR_start, LR_decay,\n num_epochs,\n train_set.X, train_set.y,\n valid_set.X, valid_set.y,\n test_set.X, test_set.y,\n save_path=\"./TCDTIMITBestModel\",\n shuffle_parts=shuffle_parts)\n\n\ndef unpickle(file):\n import cPickle\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\n\ndef load_dataset (datapath = os.path.join(os.path.expanduser('~/TCDTIMIT/database_binary')), trainFraction=0.8, validFraction=0.1, testFraction=0.1):\n # from https://www.cs.toronto.edu/~kriz/cifar.html\n # also see http://stackoverflow.com/questions/35032675/how-to-create-dataset-similar-to-cifar-10\n\n # Lipspeaker 1: 14627 phonemes, 14617 extacted and useable\n # Lipspeaker 2: 28363 - 14627 = 13736 phonemes 13707 extracted\n # Lipspeaker 3: 42535 - 28363 = 14172 phonemes 14153 extracted\n # total Lipspeakers: 14500 + 13000 + 14000 = 42477\n\n dtype = 'uint8'\n ntotal = 50000 # estimate, for initialization. takes some safty margin\n img_shape = (1, 120, 120)\n img_size = np.prod(img_shape)\n\n # prepare data to load\n fnamesLipspkrs = ['Lipspkr%i.pkl' % i for i in range(1,4)] # all 3 lipsteakers\n fnamesVolunteers = []#['Volunteer%i.pkl' % i for i in range(1,11)] # 12 first volunteers\n fnames = fnamesLipspkrs + fnamesVolunteers\n datasets = {}\n for name in fnames:\n fname = os.path.join(datapath, name)\n if not os.path.exists(fname):\n raise IOError(fname + \" was not found.\")\n datasets[name] = cache.datasetCache.cache_file(fname)\n\n # load the images\n # first initialize the matrices\n lenx = ntotal\n xtrain = np.zeros((lenx, img_size), dtype=dtype)\n xvalid = np.zeros((lenx, img_size), dtype=dtype)\n xtest = np.zeros((lenx, img_size), dtype=dtype)\n\n ytrain = np.zeros((lenx, 1), dtype=dtype)\n yvalid = np.zeros((lenx, 1), dtype=dtype)\n ytest = np.zeros((lenx, 1), dtype=dtype)\n\n # memory issues: print size\n memTot = xtrain.nbytes + xvalid.nbytes + xtest.nbytes + ytrain.nbytes + yvalid.nbytes + ytest.nbytes\n # print(\"Empty matrices, memory required: \", memTot / 1000000, \" MB\")\n\n # now load train data\n trainLoaded = 0\n validLoaded = 0\n testLoaded = 0\n\n for i, fname in enumerate(fnames):\n print(\"Total loaded till now: \", trainLoaded + validLoaded + testLoaded, \" out of \", ntotal)\n print(\"nbTrainLoaded: \", trainLoaded)\n print(\"nbValidLoaded: \", validLoaded)\n print(\"nbTestLoaded: \", testLoaded)\n\n print('loading file %s' % datasets[fname])\n data = unpickle(datasets[fname])\n\n thisN = data['data'].shape[0]\n print(\"This dataset contains \", thisN, \" images\")\n\n thisTrain = int(trainFraction * thisN)\n thisValid = int(validFraction * thisN)\n thisTest = thisN - thisTrain - thisValid # compensates for rounding\n print(\"now loading : nbTrain, nbValid, nbTest\")\n print(\" \", thisTrain, thisValid, thisTest)\n\n xtrain[trainLoaded:trainLoaded + thisTrain, :] = data['data'][0:thisTrain]\n xvalid[validLoaded:validLoaded + thisValid, :] = data['data'][thisTrain:thisTrain + thisValid]\n xtest[testLoaded:testLoaded + thisTest, :] = data['data'][thisTrain + thisValid:thisN]\n\n ytrain[trainLoaded:trainLoaded + thisTrain, 0] = data['labels'][0:thisTrain]\n yvalid[validLoaded:validLoaded + thisValid, 0] = data['labels'][thisTrain:thisTrain + thisValid]\n ytest[testLoaded:testLoaded + thisTest, 0] = data['labels'][thisTrain + thisValid:thisN]\n\n trainLoaded += thisTrain\n validLoaded += thisValid\n testLoaded += thisTest\n\n\n if (trainLoaded + validLoaded + testLoaded) >= ntotal:\n print(\"loaded too many?\")\n break\n\n ntest = testLoaded\n nvalid = validLoaded\n ntrain = trainLoaded\n print(\"Total loaded till now: \", trainLoaded + validLoaded + testLoaded, \" out of \", ntotal)\n print(\"nbTrainLoaded: \", trainLoaded)\n print(\"nbValidLoaded: \", validLoaded)\n print(\"nbTestLoaded: \", testLoaded)\n\n # remove unneeded rows\n xtrain = xtrain[0:trainLoaded]\n xvalid = xvalid[0:validLoaded]\n xtest = xtest[0:testLoaded]\n ytrain = ytrain[0:trainLoaded]\n yvalid = yvalid[0:validLoaded]\n ytest = ytest[0:testLoaded]\n\n memTot = xtrain.nbytes + xvalid.nbytes + xtest.nbytes + ytrain.nbytes + yvalid.nbytes + ytest.nbytes\n # print(\"Total memory size required: \", memTot / 1000000, \" MB\")\n\n # process this data, remove all zero rows (http://stackoverflow.com/questions/18397805/how-do-i-delete-a-row-in-a-np-array-which-contains-a-zero)\n # cast to numpy array\n if isinstance(ytrain, list):\n ytrain = np.asarray(ytrain).astype(dtype)\n if isinstance(yvalid, list):\n yvalid = np.asarray(yvalid).astype(dtype)\n if isinstance(ytest, list):\n ytest = np.asarray(ytest).astype(dtype)\n\n # fix labels (labels start at 1, but the library expects them to start at 0)\n ytrain = ytrain - 1\n yvalid = yvalid - 1\n ytest = ytest - 1\n\n # now, make objects with these matrices\n train_set = datasetClass.CIFAR10(xtrain, ytrain, img_shape)\n valid_set = datasetClass.CIFAR10(xvalid, yvalid, img_shape)\n test_set = datasetClass.CIFAR10(xtest, ytest, img_shape)\n\n # Inputs in the range [-1,+1]\n # def f1 (x):\n # f = function([], sandbox.cuda.basic_ops.gpu_from_host(x * 2.0 / 255 - 1))\n # return f()\n #\n # def scaleOnGpu (matrix):\n # nbRows = matrix.shape[0]\n # done = 0\n # batchLength = 100\n # thisBatchLength = batchLength\n # i = 0\n # while done != 1:\n # if i + thisBatchLength > nbRows:\n # done = 1\n # thisBatchLength = nbRows - i\n # # do the scaling on GPU\n # matrix[i:(i + thisBatchLength), :] = f1(\n # shared(matrix[i:(i + thisBatchLength), :]))\n # i += batchLength\n # return matrix\n #\n # train_set.X = scaleOnGpu(train_set.X )\n # valid_set.X = scaleOnGpu(valid_set.X )\n # test_set.X = scaleOnGpu(test_set.X)\n\n train_set.X = np.subtract(np.multiply(2. / 255., train_set.X), 1.)\n valid_set.X = np.subtract(np.multiply(2. / 255., valid_set.X), 1.)\n test_set.X = np.subtract(np.multiply(2. / 255., test_set.X), 1.)\n\n train_set.X = np.reshape(train_set.X, (-1, 1, 120, 120))\n valid_set.X = np.reshape(valid_set.X, (-1, 1, 120, 120))\n test_set.X = np.reshape(test_set.X, (-1, 1, 120, 120))\n\n # flatten targets\n train_set.y = np.hstack(train_set.y)\n valid_set.y = np.hstack(valid_set.y)\n test_set.y = np.hstack(test_set.y)\n # Onehot the targets\n train_set.y = np.float32(np.eye(39)[train_set.y])\n valid_set.y = np.float32(np.eye(39)[valid_set.y])\n test_set.y = np.float32(np.eye(39)[test_set.y])\n # for hinge loss\n train_set.y = 2 * train_set.y - 1.\n valid_set.y = 2 * valid_set.y - 1.\n test_set.y = 2 * test_set.y - 1.\n\n return train_set, valid_set, test_set\n\n\n############# BINARY NET #######################3\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom theano.scalar.basic import UnaryScalarOp, same_out_nocomplex\nfrom theano.tensor.elemwise import Elemwise\n\n\n# Our own rounding function, that does not set the gradient to 0 like Theano's\nclass Round3(UnaryScalarOp):\n def c_code (self, node, name, (x, ), (z, ), sub):\n return \"%(z)s = round(%(x)s);\" % locals()\n \n def grad (self, inputs, gout):\n (gz,) = gout\n return gz,\n\n\nround3_scalar = Round3(same_out_nocomplex, name='round3')\nround3 = Elemwise(round3_scalar)\n\n\ndef hard_sigmoid (x):\n return T.clip((x + 1.) / 2., 0, 1)\n\n\n# The neurons' activations binarization function\n# It behaves like the sign function during forward propagation\n# And like:\n# hard_tanh(x) = 2*hard_sigmoid(x)-1\n# during back propagation\ndef binary_tanh_unit (x):\n return 2. * round3(hard_sigmoid(x)) - 1.\n\n\ndef binary_sigmoid_unit (x):\n return round3(hard_sigmoid(x))\n\n\n\n# This function computes the gradient of the binary weights\ndef compute_grads (loss, network):\n layers = lasagne.layers.get_all_layers(network)\n grads = []\n \n for layer in layers:\n \n params = layer.get_params(binary=True)\n if params:\n # print(params[0].name)\n grads.append(theano.grad(loss, wrt=layer.Wb))\n \n return grads\n\n\n# This functions clips the weights after the parameter update\ndef clipping_scaling (updates, network):\n layers = lasagne.layers.get_all_layers(network)\n updates = OrderedDict(updates)\n \n for layer in layers:\n \n params = layer.get_params(binary=True)\n for param in params:\n print(\"W_LR_scale = \" + str(layer.W_LR_scale))\n print(\"H = \" + str(layer.H))\n updates[param] = param + layer.W_LR_scale * (updates[param] - param)\n updates[param] = T.clip(updates[param], -layer.H, layer.H)\n \n return updates\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.586670458316803, "alphanum_fraction": 0.5969679355621338, "avg_line_length": 28.61403465270996, "blob_id": "b2116e829ef03de13f63e2ae63d1d3a6650ea801", "content_id": "2b1b18783cfbd905180ae7d1110dd826a1453339", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3496, "license_type": "permissive", "max_line_length": 94, "num_lines": 114, "path": "/code/Experiments/BinaryNet-master/Run-time/mnist.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\r\nimport sys\r\nimport os\r\nimport time\r\n\r\nimport numpy as np\r\nnp.random.seed(1234) # for reproducibility\r\n\r\n# specifying the gpu to use\r\n# import theano.sandbox.cuda\r\n# theano.sandbox.cuda.use('gpu1') \r\nimport theano\r\nimport theano.tensor as T\r\n\r\nimport lasagne\r\n\r\nimport cPickle as pickle\r\nimport gzip\r\n\r\nimport binary_ops\r\n\r\nfrom pylearn2.datasets.mnist import MNIST\r\nfrom pylearn2.utils import serial\r\n\r\nfrom collections import OrderedDict\r\n\r\nif __name__ == \"__main__\":\r\n \r\n batch_size = 10000\r\n print(\"batch_size = \"+str(batch_size))\r\n \r\n # MLP parameters\r\n num_units = 4096\r\n print(\"num_units = \"+str(num_units))\r\n n_hidden_layers = 3\r\n print(\"n_hidden_layers = \"+str(n_hidden_layers))\r\n \r\n # kernel = \"baseline\"\r\n kernel = \"xnor\"\r\n # kernel = \"theano\"\r\n print(\"kernel = \"+ kernel)\r\n \r\n print('Loading MNIST dataset...')\r\n \r\n test_set = MNIST(which_set= 'test', center = False)\r\n # Inputs in the range [-1,+1]\r\n test_set.X = 2* test_set.X.reshape(-1, 784) - 1.\r\n # flatten targets\r\n test_set.y = test_set.y.reshape(-1)\r\n\r\n print('Building the MLP...') \r\n \r\n # Prepare Theano variables for inputs and targets\r\n input = T.matrix('inputs')\r\n target = T.vector('targets')\r\n\r\n mlp = lasagne.layers.InputLayer(shape=(None, 784),input_var=input)\r\n \r\n # Input layer is not binary -> use baseline kernel in first hidden layer\r\n mlp = binary_ops.DenseLayer(\r\n mlp,\r\n nonlinearity=lasagne.nonlinearities.identity,\r\n num_units=num_units,\r\n kernel = \"baseline\") \r\n \r\n mlp = lasagne.layers.BatchNormLayer(mlp)\r\n mlp = lasagne.layers.NonlinearityLayer(mlp,nonlinearity=binary_ops.SignTheano)\r\n \r\n for k in range(1,n_hidden_layers):\r\n \r\n mlp = binary_ops.DenseLayer(\r\n mlp,\r\n nonlinearity=lasagne.nonlinearities.identity,\r\n num_units=num_units,\r\n kernel = kernel) \r\n \r\n mlp = lasagne.layers.BatchNormLayer(mlp)\r\n mlp = lasagne.layers.NonlinearityLayer(mlp,nonlinearity=binary_ops.SignTheano)\r\n \r\n mlp = binary_ops.DenseLayer(\r\n mlp, \r\n nonlinearity=lasagne.nonlinearities.identity,\r\n num_units=10,\r\n kernel = kernel)\r\n \r\n mlp = lasagne.layers.BatchNormLayer(mlp)\r\n test_output = lasagne.layers.get_output(mlp, deterministic=True)\r\n test_err = T.mean(T.neq(T.argmax(test_output, axis=1), target),dtype=theano.config.floatX)\r\n\r\n # Compile a second function computing the validation loss and accuracy:\r\n val_fn = theano.function([input, target], test_err)\r\n \r\n print(\"Loading the trained parameters and binarizing the weights...\")\r\n\r\n # Load parameters\r\n with np.load('mnist_parameters.npz') as f:\r\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\r\n lasagne.layers.set_all_param_values(mlp, param_values)\r\n\r\n # Binarize the weights\r\n params = lasagne.layers.get_all_params(mlp)\r\n for param in params:\r\n # print param.name\r\n if param.name == \"W\":\r\n param.set_value(binary_ops.SignNumpy(param.get_value()))\r\n \r\n print('Running...')\r\n \r\n start_time = time.time()\r\n \r\n test_error = val_fn(test_set.X,test_set.y)*100.\r\n print \"test_error = \" + str(test_error) + \"%\"\r\n \r\n run_time = time.time() - start_time\r\n print(\"run_time = \"+str(run_time)+\"s\")\r\n " }, { "alpha_fraction": 0.7448979616165161, "alphanum_fraction": 0.7938775420188904, "avg_line_length": 60.25, "blob_id": "81006156fde9d11b1741c7edeaf8609fc8aee44c", "content_id": "a3e33bb6e6c21c7278836a48823e100e4327faf9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 490, "license_type": "permissive", "max_line_length": 192, "num_lines": 8, "path": "/code/audioSR/Spoken-language-identification-master/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# Spoken language identification with deep learning\n\nRead more in the following blog posts:\n\n* [About TopCoder contest and our CNN-based solution implemented in Caffe](http://yerevann.github.io/2015/10/11/spoken-language-identification-with-deep-convolutional-networks/) (October 2015)\n* [About combining CNN and RNN using Theano/Lasagne](http://yerevann.github.io/2016/06/26/combining-cnn-and-rnn-for-spoken-language-identification/) (June 2016)\n\nTheano/Lasagne models are [here](/theano)\n" }, { "alpha_fraction": 0.6301644444465637, "alphanum_fraction": 0.634175717830658, "avg_line_length": 31.363636016845703, "blob_id": "98b39d37177925c4d8ca265da6885c3786d4deda", "content_id": "f5d8b710277da80bdb32b5ec039bc533ba9c1553", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2493, "license_type": "permissive", "max_line_length": 139, "num_lines": 77, "path": "/code/audioSR/Preprocessing/fixWavs.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import os, errno\nimport subprocess\nimport sys\n\n# We need to execute this command for every wav file we find:\n# mplayer \\\n# -quiet \\\n# -vo null \\\n# -vc dummy \\\n# -ao pcm:waveheader:file=\"audio_FIXED.wav\" audio_BROKEN.wav\n# (see http://en.linuxreviews.org/HOWTO_Convert_audio_files)\n\n# Read a file that contains the paths of all our .wav files\n# Then, for each wav file, get the path where the fixed version should be stored\n# Then generate the fixed files.\nfrom prepareWAV_HTK import *\n\n\ndef readFile(filename):\n with open(filename, \"r\") as ins:\n array = []\n for line in ins:\n line = line.strip('\\n') # strip newlines\n if len(line) > 1: # don't save the dots lines\n array.append(line)\n\n return array\n\ndef fixWav(wavPath, fixedWavPath):\n\n name = os.path.basename(wavPath)\n if not os.path.exists(os.path.dirname(fixedWavPath)): # skip already existing videos (it is assumed the exist if the directory exists)\n os.makedirs(os.path.dirname(fixedWavPath))\n\n if not os.path.exists(fixedWavPath):\n command = ['mplayer',\n '-quiet',\n '-vo', 'null',\n '-vc', 'dummy',\n '-ao', 'pcm:waveheader:file='+fixedWavPath,\n wavPath]\n\n # actually run the command, only show stderror on terminal, close the processes (don't wait for user input)\n FNULL = open(os.devnull, 'w')\n p = subprocess.Popen(command, stdout=FNULL, stderr=subprocess.STDOUT, close_fds=True) # stdout=subprocess.PIPE\n return 1\n else:\n return 0\n\ndef getFixedWavPath(path, baseDir, fixedDir):\n thisDir = os.path.dirname(path)\n relPath = os.path.relpath(thisDir, baseDir)\n newPath = ''.join([fixedDir,os.sep,relPath,os.sep,os.path.basename(path)])\n return newPath\n\n\ndef fixWavs(baseDir, fixedDir):\n print \"Fixing WAV files in \", baseDir, \" and storing to: \", fixedDir\n\n # generate file that contains paths to wav files\n print \"Searching for WAVs in: \", baseDir\n prepareWAV_HTK(baseDir, baseDir)\n\n pathsFile = baseDir + os.sep + 'wavPaths.txt'\n wavPaths = readFile(pathsFile)\n\n # fix files, store them under fixedDir\n nbFixed=0\n for wavPath in wavPaths:\n fixedWavPath = getFixedWavPath(wavPath, baseDir, fixedDir)\n fixWav(wavPath, fixedWavPath)\n\n nbFixed+=1\n if (nbFixed % 100 == 0):\n print \"Fixed \", nbFixed, \"out of\",len(wavPaths)\n\n return 0\n\n" }, { "alpha_fraction": 0.7246376872062683, "alphanum_fraction": 0.7555135488510132, "avg_line_length": 40.76315689086914, "blob_id": "38ffc73fdb3c50e3557cbf61db040c91cc7e40d1", "content_id": "36c2cf5ac31f8258c3af8c57b823fb1738fc3488", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1587, "license_type": "permissive", "max_line_length": 129, "num_lines": 38, "path": "/code/Experiments/BinaryNet-master/Run-time/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# Run-time\n\n## Motivations\n\nThis subrepository demonstrates the XNOR and baseline GPU kernels described in the article: \n[BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1.](http://arxiv.org/abs/1602.02830)\n\n## Requirements\n\n* Python 2.7, Numpy, Scipy\n* [Theano](http://deeplearning.net/software/theano/install.html)\n* Nvidia GPU (not optional)\n* Setting your [Theano flags](http://deeplearning.net/software/theano/library/config.html) to use the GPU\n* [Pylearn2](http://deeplearning.net/software/pylearn2/)\n* [Downloading MNIST](https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/scripts/datasets/download_mnist.py)\n* [Lasagne](http://lasagne.readthedocs.org/en/latest/user/installation.html)\n\n## Matrix multiplication\n\n nvcc benchmark-cublas.cu -std=c++11 -lcublas && ./a.out\n \nThis benchmark performs 8192x8192x8192 matrix multiplications with our two kernels and cuBLAS.\nThe three kernels return exactly the same output when their inputs are constrained to -1 or +1 (but not otherwise).\n**The XNOR kernel is about 23x faster than the baseline kernel and 3.4x faster than cuBLAS** on a GTX750 Nvidia GPU.\n\n## MNIST MLP\n\nFirst, you need to get a trained MNIST MLP:\n\n python ../Train-time/mnist.py \n \nThen, you can run the trained MNIST MLP using our XNOR GPU kernel:\n\n python mnist.py\n \nThe execution time largely depends on your GPU (between 0.4s and 1.5s).\nThe test error rate should be around 0.96%.\nYou can compare these results with the baseline kernel or Theano's by modifying the line ~60 in the script.\n" }, { "alpha_fraction": 0.5757864713668823, "alphanum_fraction": 0.6005719900131226, "avg_line_length": 25.897436141967773, "blob_id": "6f18836cf995bd4679198647b5315557c7aa1d49", "content_id": "80b3af2949e69196265ecda0334f11f94e30f163", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1049, "license_type": "permissive", "max_line_length": 73, "num_lines": 39, "path": "/code/audioSR/Spoken-language-identification-master/make_submission.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" Usage: python make_submission.py csvpath model_name\ncsv - must contain 12320 rows, 176 coloumns: the predictions for test set\n\"\"\"\n\nimport sys\nimport numpy as np\n\n# info about classes\nfile = open('trainingData.csv')\ndata = file.readlines()[1:]\nlangs = set()\nfor line in data:\n filepath, language = line.split(',')\n language = language.strip()\n langs.add(language)\nlangs = sorted(langs)\n\npath = sys.argv[1]\nname = sys.argv[2]\nread_file = open(path, 'r')\nf = open('testingData.csv')\ncnt = 12320\nprint_file = open('predictions/test_' + name + '.csv', 'w')\n\nfor iter in range(cnt):\n st = f.readline()\n name = st.strip()[:-4]\n \n out = read_file.readline().split(',')\n out = [float(x) for x in out]\n pred = sorted([(x, it) for it, x in enumerate(out)], reverse=True)\n\n for i in range(3):\n lang_id = pred[i][1]\n lang = langs[lang_id]\n print_file.write(name + '.mp3,' + lang + ',' + str(i + 1) + '\\n')\n\n if (iter % 100 == 0):\n print >> sys.stderr, \"processed %d / %d images\" % (iter + 1, cnt)\n" }, { "alpha_fraction": 0.5364120602607727, "alphanum_fraction": 0.5719360709190369, "avg_line_length": 23.521739959716797, "blob_id": "74d2370e44016495a3b316120faf91fcb186c24d", "content_id": "d435978354dbe4bf5e313649d9de286b0fe00bf6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 563, "license_type": "permissive", "max_line_length": 54, "num_lines": 23, "path": "/code/audioSR/Spoken-language-identification-master/get_sum_of_csvs.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" Usage: python get_sum_csvs.py csv1path csv2path ..\n\"\"\"\nimport sys\nimport numpy as np\n\nn_csv = len(sys.argv) - 1\ncnt = 12320\n\ncsv = []\nfor index in range(1, len(sys.argv)):\n csv.append(open(sys.argv[index], 'r'))\n \noutfile = open('summed.csv', 'w')\n\nfor iter in range(12320):\n out = np.zeros((176,), dtype=np.float32)\n for index in range(n_csv):\n cur_out = csv[index].readline().split(',')\n cur_out = [float(x) for x in cur_out]\n out += cur_out\n \n out = [(\"%.6f\" % x) for x in out]\n outfile.write(','.join(out) + '\\n')" }, { "alpha_fraction": 0.5993408560752869, "alphanum_fraction": 0.624764621257782, "avg_line_length": 30.25, "blob_id": "32d2c7d04d31f1348bd0e8927f3e31128bd239c1", "content_id": "baba7dd1653cf1576c77caaf187a9747dc64c7e7", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2124, "license_type": "permissive", "max_line_length": 116, "num_lines": 68, "path": "/code/audioSR/Spoken-language-identification-master/get_score_from_top3_prediction.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" USAGE: python get_score_fromcsv.py --prediction= --anwser=\n \n Prediction file may have less lines\n \n Each line of prediction file must contain at least 3 integers: labels of top3\n predictions, then it may have some additional information\n\"\"\"\nimport sys\nimport numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--prediction', type=str)\nparser.add_argument('--answer', type=str, default='valDataNew.csv')\nargs = parser.parse_args()\nprint args\n\n\n# info about classes\nfile = open('trainingData.csv')\ndata = file.readlines()[1:]\nlangs = set()\nfor line in data:\n filepath, language = line.split(',')\n language = language.strip()\n langs.add(language)\nlangs = sorted(langs)\n\n\nprediction_file = open(args.prediction, 'r')\nprediction_lines = prediction_file.readlines()\nanswer_file = open(args.answer, 'r')\nanswer_lines = answer_file.readlines()\ncnt = len(prediction_lines)\ntop_coder_score = 0.0\ncorrect = 0\n\nwrong_answers = open('wrong_answers.txt', 'w')\n\nfor iter in range(cnt):\n st = answer_lines[iter]\n (name, label) = st.split(',')\n label = int(label)\n\n pred = prediction_lines[iter].split(',')\n pred = [int(x) for x in pred]\n\n if (pred[0] == label):\n correct += 1\n top_coder_score = top_coder_score + 1000\n elif (pred[1] == label):\n #correct += 1\n top_coder_score = top_coder_score + 400\n elif (pred[2] == label):\n #correct += 1\n top_coder_score = top_coder_score + 160\n\n if (pred[0] != label):\n print >> wrong_answers, (answer_lines[iter] + str(pred[3 + pred[0]]) + ',' + str(pred[3 + pred[1]]) + ',' + \n str(pred[3 + pred[2]]) + ', votes for correct answer: ' + str(pred[3 + label])) \n\n if ((iter + 1) % 100 == 0):\n print >> sys.stderr, \"processed %d / %d images\" % (iter + 1, cnt)\n print >> sys.stderr, \"expected score:\", top_coder_score / (iter + 1) * 35200\n\nprint >> sys.stderr, \"Final score: \", top_coder_score, \" / \", cnt, \"000\"\nprint >> sys.stderr, \"expected score:\", top_coder_score / cnt * 35200\nprint >> sys.stderr, \"Accuracy: \", 100.0 * correct / cnt" }, { "alpha_fraction": 0.5551331043243408, "alphanum_fraction": 0.5912547707557678, "avg_line_length": 31.875, "blob_id": "c5c8d273c7132521bacaa9d9e8504800a8c07775", "content_id": "0f829158d8a8222ebc1f0d07208b5307adffced8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 526, "license_type": "permissive", "max_line_length": 103, "num_lines": 16, "path": "/code/audioSR/Preprocessing/progress_bar.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import sys\n\n\n# def update_progress(amtDone):\n# sys.stdout.write(\"\\rProgress: [{0:50s}] {1:.1f}%\".format('#' * int(amtDone * 50), amtDone * 100))\n# sys.stdout.flush()\n\ndef show_progress(frac_done, bar_length=20):\n # for i in range(end_val):\n hashes = '#' * int(round(frac_done * bar_length))\n spaces = ' ' * (bar_length - len(hashes))\n sys.stdout.write(\"\\rProgress: [{0}] {1}% \".format(hashes + spaces, int(round(frac_done * 100))))\n sys.stdout.flush()\n\nif __name__ == '__main__':\n show_progress(0.8)\n" }, { "alpha_fraction": 0.6419640779495239, "alphanum_fraction": 0.657202422618866, "avg_line_length": 38.57732009887695, "blob_id": "8dd86c06fedee534211aeeea65c71c7fae4e403f", "content_id": "029a6baa822093449745bbed447f37802c3e2d47", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7678, "license_type": "permissive", "max_line_length": 151, "num_lines": 194, "path": "/code/lipreading/evaluateImage.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport urllib\nimport io\nimport skimage.transform\nimport matplotlib\n\nmatplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\n\nplt.rcParams['figure.figsize'] = 8, 6\nimport argparse\nimport time\nimport pickle\nimport numpy as np\n\nimport lasagne\nimport theano\nimport os\nimport numpy as np\nfrom PIL import Image\n\nimport buildNetworks\n\nnbClassesPhonemes = 39\nnbClassesVisemes = 12\n\n\nparser = argparse.ArgumentParser(description=\"Getting top results for this image...\")\nadd_arg = parser.add_argument\nadd_arg(\"-i\", \"--input-image\", help=\"Input image to be evaluated\")\nadd_arg(\"-n\", \"--network-type\", help=\"Type of network to be used\", default=1)\nadd_arg(\"-p\", \"--phoneme-trained\", help=\"Network outputting phonemes (1) or visemes (0)\", default=0)\n#add_arg(\"-m\", \"--model-file\", help=\"Model pickle file that contains trained network parameters\")\nargs = parser.parse_args()\n\n# this imported file contains build_model(), which constructs the network structure that you van fill using the pkl file\n# to generate the pkl file, you need to run the main function in resnet50CaffeToLasagne_ImageNet,\n# which populates the network from caffe, gets the classes and the mean image, and stores those in a pkl file\nfrom lipreadingTCDTIMIT import *\n\n\n# build the model structure, fill in the stored parameters from a trained network with this structure\n# networkType: 1 = CIFAR10, 2 = GoogleNet, 3 = ResNet50\n# phonemeViseme: 1 = phoneme-trained, 0 = viseme-trained (meaning outputs are visemes)\ndef load_model (phonemeViseme, networkType):\n # network parameters\n alpha = .1\n print(\"alpha = \" + str(alpha))\n epsilon = 1e-4\n print(\"epsilon = \" + str(epsilon))\n\n # activation\n activation = T.nnet.relu\n print(\"activation = T.nnet.relu\")\n inputs = T.tensor4('inputs')\n targets = T.matrix('targets')\n\n if phonemeViseme ==1: #use phoneme-trained network\n if networkType == 1: # CIFAR10\n cnn = buildNetworks.build_network_cifar10(activation, alpha, epsilon, inputs, nbClassesPhonemes) # nbClassesPhonemes = 39 (global variable)\n with np.load('./results/Phoneme_trained/CIFAR10/allLipspeakers/allLipspeakers.npz') as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(cnn, param_values)\n\n elif networkType == 2: #GoogleNet\n cnn = buildNetworks.build_network_google(activation, alpha, epsilon, inputs, nbClassesPhonemes)\n with np.load('./results/Phoneme_trained/GoogleNet/allLipspeakers/allLipspeakers.npz') as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(cnn, param_values)\n\n elif networkType == 3: #ResNet50\n cnn = buildNetworks.build_network_resnet50(inputs, nbClassesPhonemes)\n with np.load('./results/Phoneme_trained/ResNet50/allLipspeakers/allLipspeakers.npz') as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(cnn['prob'], param_values)\n else:\n print('ERROR: given network type unknown.')\n\n else: #use viseme-trained network\n cnn = buildNetworks.build_network_google(activation, alpha, epsilon, inputs, nbClassesVisemes) # nbClassesVisemes = 13 (global variable)\n with np.load('./results/Viseme_trained/GoogleNet/allLipspeakers.npz') as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(cnn, param_values)\n\n return cnn\n\n# scale to [0-2], then substract 1 to center around 0 (so now all values are in [-1,1] area)\n# then reshape to make the image fit the network input size\ndef prep_image (fname):\n im = np.array(Image.open(fname), dtype=np.uint8).flatten()\n im = np.subtract(np.multiply(2. / 255., im), 1.)\n im = np.reshape(im, (-1, 1, 120, 120))\n\n return im.astype('float32')\n\n# functions that evaluate the network\n# networkType: 1 = CIFAR10, 2 = GoogleNet, 3 = ResNet50\ndef get_net_fun (phonemeViseme, networkType, numberShown=5):\n print(\"Loading model...\")\n net = load_model(phonemeViseme, networkType)\n\n inputs = T.tensor4('inputs')\n target = T.tensor4('targets')\n k = 5 #get top-5 accuracy\n\n print(\"Compiling Theano evaluation functions...\")\n if (networkType == 3): #ResNets needs a different way of evaluating\n prediction = lasagne.layers.get_output(net['prob'], deterministic=True)\n get_class_prob = theano.function([net['input'].input_var], prediction)\n\n else:\n prediction = lasagne.layers.get_output(net, deterministic=True)\n get_class_prob = theano.function([inputs, target], prediction)\n\n # top 1 accuracy\n print(\"Printing prediction...\")\n print(prediction)\n print(\"Calulating accuracy...\")\n accuracy = T.mean(T.eq(T.argmax(prediction, axis=1), target), dtype=theano.config.floatX)\n # Top k accuracy\n accuracy_k = T.mean(T.any(T.eq(T.argsort(prediction, axis=1)[:, -k:], target.dimshuffle(0, 'x')), axis=1),\n dtype=theano.config.floatX)\n print(\"Compilation done.\")\n\n def print_top5 (im_path):\n print(\"Preprocessing image...\")\n im = prep_image(im_path)\n print(\"Image preprocessed.\")\n\n print(\"Evaluating image...\")\n prob = get_class_prob(im)[0]\n print(prob)\n phonemeNumberMap = getPhonemeNumberMap()\n pred = []\n\n if (numberShown > len(prob) or numberShown < 1): #sanity check\n numberShown = len(prob)\n\n for i in range(0,numberShown): #print network output probabilities\n p = prob[i]\n prob_phoneme = phonemeNumberMap[str(i+1)]\n pred.append([prob_phoneme, p])\n pred = sorted(pred, key=lambda t: t[1], reverse=True)\n for p in pred:\n print(p)\n\n print(\"All done.\")\n\n return get_class_prob, print_top5, accuracy, accuracy_k\n\ndef getPhonemeNumberMap (phonemeMap=\"./phonemeLabelConversion.txt\"):\n phonemeNumberMap = {}\n with open(phonemeMap) as inf:\n for line in inf:\n parts = line.split() # split line into parts\n if len(parts) > 1: # if at least 2 parts/columns\n phonemeNumberMap[str(parts[0])] = parts[1] # part0= frame, part1 = phoneme\n phonemeNumberMap[str(parts[1])] = parts[0]\n return phonemeNumberMap\n\n# Lets take five images and compare prediction of Lasagne with Caffe\ndef test_lasagne_ImageNet (classes, image_urls, mean_values, net):\n im = prep_image(url, mean_values)\n prob = np.array(lasagne.layers.get_output(net['prob'], im, deterministic=True).eval())[0]\n\n print('LProbabilities: ')\n print(prob)\n\n res = sorted(prob_phoneme, key=lambda t: t, reverse=True)[:]\n for p in res:\n print(' ', p)\n\n plt.figure()\n plt.imshow(rawim.astype('uint8'))\n plt.axis('off')\n plt.show()\n\n print('\\n\\n')\n\nif __name__ == \"__main__\":\n print(\"Compiling functions...\")\n get_prob, print_top5, accuracy, accuracy_k = get_net_fun(1, 3, 10) # argument = phonemeViseme, networkType, npz model, numberResultsShown\n print(\"the network had \", accuracy, \" top 1 accuracy\")\n print(\"the network had \", accuracy_k, \" top 5 accuracy\")\n\n t0 = time.clock()\n print_top5(args.input_image)\n t1 = time.clock()\n print(\"Total time taken {:.4f}\".format(t1 - t0))\n\n# Usage example\n#python preprocessImage.py -i testImages/w.jpg\n#python evaluateImage.py -i testImages/w_mouth_gray_resized.jpg -m results/ResNet50/allLipspeakers/allLipspeakers.npz\n" }, { "alpha_fraction": 0.5466307401657104, "alphanum_fraction": 0.5676549673080444, "avg_line_length": 29.42622947692871, "blob_id": "2cf014e0abad4b02f052e40323583cf6a3164d07", "content_id": "ad0c093e5f5895c7f5355ae85a02ab6751701067", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "permissive", "max_line_length": 82, "num_lines": 61, "path": "/code/Experiments/Tutorials/nn-from-scratch/simple_classification.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "__author__ = 'm.bashari'\nimport numpy as np\nfrom sklearn import datasets, linear_model\nimport matplotlib.pyplot as plt\n\n\ndef generate_data():\n # np.random.seed(0)\n # X, y = datasets.make_moons(200, noise=0.20)\n N = 100 # number of points per class\n D = 2 # dimensionality\n K = 3 # number of classes\n X = np.zeros((N * K, D)) # data matrix (each row = single example)\n y = np.zeros(N * K, dtype='uint8') # class labels\n for j in xrange(K):\n ix = range(N * j, N * (j + 1))\n r = np.linspace(0.0, 1, N) # radius\n t = np.linspace(j * 4, (j + 1) * 4, N) + np.random.randn(N) * 0.2 # theta\n X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]\n y[ix] = j\n return X, y\n\n\ndef visualize(X, y, clf):\n # plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)\n # plt.show()\n plot_decision_boundary(lambda x: clf.predict(x), X, y)\n plt.title(\"Logistic Regression\")\n\n\ndef plot_decision_boundary(pred_func, X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()\n\n\ndef classify(X, y):\n clf = linear_model.LogisticRegressionCV()\n clf.fit(X, y)\n return clf\n\n\ndef main():\n X, y = generate_data()\n # visualize(X, y)\n clf = classify(X, y)\n visualize(X, y, clf)\n\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.7727272510528564, "alphanum_fraction": 0.8030303120613098, "avg_line_length": 51.79999923706055, "blob_id": "74781c9cc424e400325b5f6781579232392b2818", "content_id": "07df1885bdaa956e9c69c3c28a34b6e8a142f012", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 264, "license_type": "permissive", "max_line_length": 139, "num_lines": 5, "path": "/code/audioSR/Spoken-language-identification-master/theano/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# Spoken language identification\n\n`networks` folder contains multiple CNN and/or RNN models implemented in Theano/Lasagne.\n\nRead more in the corresponding [blog post](http://yerevann.github.io/2016/06/26/combining-cnn-and-rnn-for-spoken-language-identification/).\n" }, { "alpha_fraction": 0.6209539771080017, "alphanum_fraction": 0.6340715289115906, "avg_line_length": 39.77083206176758, "blob_id": "d2a9b2361d904a90b421c4bdb1275b81b1798e66", "content_id": "b58ae87b280b777df6ef21d9c0611177cd4f46a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5870, "license_type": "permissive", "max_line_length": 144, "num_lines": 144, "path": "/code/audioSR/Spoken-language-identification-master/theano/main.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import sys\nimport numpy as np\nimport sklearn.metrics as metrics\nimport argparse\nimport time\nimport json\nimport importlib\n\nprint \"==> parsing input arguments\"\nparser = argparse.ArgumentParser()\n\n# TODO: add argument to choose training set\nparser.add_argument('--network', type=str, default=\"network_batch\", help='embeding size (50, 100, 200, 300 only)')\nparser.add_argument('--epochs', type=int, default=500, help='number of epochs to train')\nparser.add_argument('--load_state', type=str, default=\"\", help='state file path')\nparser.add_argument('--mode', type=str, default=\"train\", help='mode: train/test/test_on_train')\nparser.add_argument('--batch_size', type=int, default=32, help='no commment')\nparser.add_argument('--l2', type=float, default=0, help='L2 regularization')\nparser.add_argument('--log_every', type=int, default=100, help='print information every x iteration')\nparser.add_argument('--save_every', type=int, default=50000, help='save state every x iteration')\nparser.add_argument('--prefix', type=str, default=\"\", help='optional prefix of network name')\nparser.add_argument('--dropout', type=float, default=0.0, help='dropout rate (between 0 and 1)')\nparser.add_argument('--no-batch_norm', dest=\"batch_norm\", action='store_false', help='batch normalization')\nparser.add_argument('--rnn_num_units', type=int, default=500, help='number of hidden units if the network is RNN')\nparser.add_argument('--equal_split', type=bool, default=False, help='use trainEqual.csv and valEqual.csv')\nparser.add_argument('--forward_cnt', type=int, default=1, help='if forward pass is nondeterministic, then how many forward passes are averaged')\n\nparser.set_defaults(batch_norm=True)\nargs = parser.parse_args()\nprint args\n\nif (args.equal_split):\n train_listfile = open(\"/mnt/hdd615/Hrayr/Spoken-language-identification/trainEqual.csv\", \"r\")\n test_listfile = open(\"/mnt/hdd615/Hrayr/Spoken-language-identification/valEqual.csv\", \"r\")\nelse:\n train_listfile = open(\"/mnt/hdd615/Hrayr/Spoken-language-identification/trainingDataNew.csv\", \"r\")\n test_listfile = open(\"/mnt/hdd615/Hrayr/Spoken-language-identification/valDataNew.csv\", \"r\")\n\ntrain_list_raw = train_listfile.readlines()\ntest_list_raw = test_listfile.readlines()\n\nprint \"==> %d training examples\" % len(train_list_raw)\nprint \"==> %d validation examples\" % len(test_list_raw)\n\ntrain_listfile.close()\ntest_listfile.close()\n\nargs_dict = dict(args._get_kwargs())\nargs_dict['train_list_raw'] = train_list_raw\nargs_dict['test_list_raw'] = test_list_raw\nargs_dict['png_folder'] = \"/mnt/hdd615/Hrayr/Spoken-language-identification/train/png/\"\n \n\n\nprint \"==> using network %s\" % args.network\nnetwork_module = importlib.import_module(\"networks.\" + args.network)\nnetwork = network_module.Network(**args_dict)\n\n\nnetwork_name = args.prefix + '%s.bs%d%s%s' % (\n network.say_name(),\n args.batch_size, \n \".bn\" if args.batch_norm else \"\", \n (\".d\" + str(args.dropout)) if args.dropout>0 else \"\")\n \nprint \"==> network_name:\", network_name\n\n\nstart_epoch = 0\nif args.load_state != \"\":\n start_epoch = network.load_state(args.load_state) + 1\n\ndef do_epoch(mode, epoch):\n # mode is 'train' or 'test' or 'predict'\n y_true = []\n y_pred = []\n avg_loss = 0.0\n prev_time = time.time()\n\n batches_per_epoch = network.get_batches_per_epoch(mode)\n all_prediction = []\n\n for i in range(0, batches_per_epoch):\n step_data = network.step(i, mode)\n prediction = step_data[\"prediction\"]\n answers = step_data[\"answers\"]\n current_loss = step_data[\"current_loss\"]\n log = step_data[\"log\"]\n \n avg_loss += current_loss\n if (mode == \"predict\" or mode == \"predict_on_train\"):\n all_prediction.append(prediction)\n for pass_id in range(args.forward_cnt-1):\n step_data = network.step(i, mode)\n prediction += step_data[\"prediction\"]\n current_loss += step_data[\"current_loss\"]\n prediction /= args.forward_cnt\n current_loss /= args.forward_cnt\n \n for x in answers:\n y_true.append(x)\n \n for x in prediction.argmax(axis=1):\n y_pred.append(x)\n \n if ((i + 1) % args.log_every == 0):\n cur_time = time.time()\n print (\" %sing: %d.%d / %d \\t loss: %3f \\t avg_loss: %.5f \\t %s \\t time: %.2fs\" % \n (mode, epoch, (i + 1) * args.batch_size, batches_per_epoch * args.batch_size, \n current_loss, avg_loss / (i + 1), log, cur_time - prev_time))\n prev_time = cur_time\n \n \n #print \"confusion matrix:\"\n #print metrics.confusion_matrix(y_true, y_pred)\n accuracy = sum([1 if t == p else 0 for t, p in zip(y_true, y_pred)])\n print \"accuracy: %.2f percent\" % (accuracy * 100.0 / batches_per_epoch / args.batch_size)\n \n if (mode == \"predict\"):\n all_prediction = np.vstack(all_prediction)\n pred_filename = \"predictions/\" + (\"equal_split.\" if args.equal_split else \"\") + \\\n args.load_state[args.load_state.rfind('/')+1:] + \".csv\"\n with open(pred_filename, 'w') as pred_csv:\n for x in all_prediction:\n print >> pred_csv, \",\".join([(\"%.6f\" % prob) for prob in x])\n \n return avg_loss / batches_per_epoch\n\n\nif args.mode == 'train':\n print \"==> training\" \t\n for epoch in range(start_epoch, args.epochs):\n do_epoch('train', epoch)\n test_loss = do_epoch('test', epoch)\n state_name = 'states/%s.epoch%d.test%.5f.state' % (network_name, epoch, test_loss)\n print \"==> saving ... %s\" % state_name\n network.save_params(state_name, epoch)\n \nelif args.mode == 'test':\n do_epoch('predict', 0)\nelif args.mode == 'test_on_train':\n do_epoch('predict_on_train', 0)\nelse:\n raise Exception(\"unknown mode\")" }, { "alpha_fraction": 0.45289263129234314, "alphanum_fraction": 0.47531232237815857, "avg_line_length": 25.577075958251953, "blob_id": "1a42ef9ddc2a5568d3d724af16c12108075a93d8", "content_id": "ed9304660ee4a2e9e755a4b11bb75033bdcf0d45", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 26896, "license_type": "permissive", "max_line_length": 86, "num_lines": 1012, "path": "/code/audioSR/HTK/htk/HTKLib/HLM.c", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "/* ----------------------------------------------------------- */\n/* */\n/* ___ */\n/* |_| | |_/ SPEECH */\n/* | | | | \\ RECOGNITION */\n/* ========= SOFTWARE */ \n/* */\n/* */\n/* ----------------------------------------------------------- */\n/* developed at: */\n/* */\n/* Speech Vision and Robotics group */\n/* Cambridge University Engineering Department */\n/* http://svr-www.eng.cam.ac.uk/ */\n/* */\n/* Entropic Cambridge Research Laboratory */\n/* (now part of Microsoft) */\n/* */\n/* ----------------------------------------------------------- */\n/* Copyright: Microsoft Corporation */\n/* 1995-2000 Redmond, Washington USA */\n/* http://www.microsoft.com */\n/* */\n/* 2001-2002 Cambridge University */\n/* Engineering Department */\n/* */\n/* Use of this software is governed by a License Agreement */\n/* ** See the file License for the Conditions of Use ** */\n/* ** This banner notice must not be removed ** */\n/* */\n/* ----------------------------------------------------------- */\n/* File: HLM.c language model handling */\n/* ----------------------------------------------------------- */\n\nchar *hlm_version = \"!HVER!HLM: 3.4.1 [CUED 12/03/09]\";\nchar *hlm_vc_id = \"$Id: HLM.c,v 1.1.1.1 2006/10/11 09:54:57 jal58 Exp $\";\n\n#include \"HShell.h\"\n#include \"HMem.h\"\n#include \"HMath.h\"\n#include \"HWave.h\"\n#include \"HLabel.h\"\n#include \"HLM.h\"\n\n/* --------------------------- Trace Flags ------------------------- */\n\n#define T_TIO 1 /* Progress tracing whilst performing IO */\n\nstatic int trace=0;\n\n/* --------------------------- Initialisation ---------------------- */\n\n#define LN10 2.30258509299404568 /* Defined to save recalculating it */\n\nstatic Boolean rawMITFormat = FALSE; /* Don't use HTK quoting and escapes */\n\n\nstatic ConfParam *cParm[MAXGLOBS]; /* config parameters */\nstatic int nParm = 0;\n\n/* EXPORT->InitLM: initialise configuration parameters */\nvoid InitLM(void)\n{\n Boolean b;\n int i;\n\n Register(hlm_version,hlm_vc_id);\n nParm = GetConfig(\"HLM\", TRUE, cParm, MAXGLOBS);\n if (nParm>0){\n if (GetConfInt(cParm,nParm,\"TRACE\",&i)) trace = i;\n if (GetConfBool(cParm,nParm,\"RAWMITFORMAT\",&b)) rawMITFormat = b;\n }\n}\n\n/*------------------------- Input Scanner ---------------------------*/\n\nstatic Source source; /* input file */\n\n/* GetInLine: read a complete line from source */\nstatic char *GetInLine(char *buf)\n{\n int i, c;\n\n c = GetCh(&source);\n if (c==EOF)\n return NULL;\n i = 0;\n while (c!='\\n' && i<MAXSTRLEN) { \n buf[i++] = c;\n c = GetCh(&source);\n } \n buf[i] = '\\0';\n return buf;\n}\n\n/* SyncStr: read input until str found */\nstatic void SyncStr(char *buf,char *str)\n{\n while (strcmp(buf, str)!=0) {\n if (GetInLine(buf)==NULL)\n HError(8150,\"SyncStr: EOF searching for %s\", str);\n }\n}\n\n/* GetInt: read int from input stream */\nstatic int GetInt(void)\n{\n int x;\n char buf[100];\n \n if (!ReadInt(&source,&x,1,FALSE))\n HError(8150,\"GetInt: Int Expected at %s\",SrcPosition(source,buf));\n return x;\n}\n\n/* GetFLoat: read float from input stream */\nstatic float GetFloat(Boolean bin)\n{\n float x;\n char buf[100];\n\n if (!ReadFloat(&source,&x,1,bin))\n HError(8150,\"GetFloat: Float Expected at %s\",SrcPosition(source,buf));\n return x;\n}\n\n/* ReadLMWord: read a string from input stream */\nstatic char *ReadLMWord(char *buf)\n{\n int i, c;\n \n if (rawMITFormat) {\n while (isspace(c=GetCh(&source)));\n i=0;\n while (!isspace(c) && c!=EOF && i<MAXSTRLEN){\n buf[i++] = c; c=GetCh(&source);\n }\n buf[i] = '\\0';\n UnGetCh(c,&source);\n if (i>0)\n return buf;\n else\n return NULL;\n }\n else {\n if (ReadString(&source,buf))\n return buf;\n else\n return NULL;\n }\n}\n\n/*------------------------- NEntry handling ---------------------------*/\n\nstatic int hvs[]= { 165902236, 220889002, 32510287, 117809592,\n 165902236, 220889002, 32510287, 117809592 };\n\n/* EXPORT->GetNEntry: Access specific NGram entry indexed by ndx */\nNEntry *GetNEntry(NGramLM *nglm,lmId ndx[NSIZE],Boolean create)\n{\n NEntry *ne;\n unsigned int hash;\n int i;\n /* #define LM_HASH_CHECK */\n \n hash=0;\n for (i=0;i<NSIZE-1;i++)\n hash=hash+(ndx[i]*hvs[i]);\n hash=(hash>>7)&(nglm->hashsize-1);\n \n for (ne=nglm->hashtab[hash]; ne!=NULL; ne=ne->link) {\n if (ne->word[0]==ndx[0]\n#if NSIZE > 2\n && ne->word[1]==ndx[1]\n#endif\n#if NSIZE > 3\n && ne->word[2]==ndx[2]\n#endif\n#if NSIZE > 4\n && ne->word[3]==ndx[3]\n#endif\n )\n break;\n }\n\n if (ne==NULL && create) {\n ne=(NEntry *) New(nglm->heap,sizeof(NEntry));\n nglm->counts[0]++;\n \n for (i=0;i<NSIZE-1;i++)\n ne->word[i]=ndx[i];\n ne->user=0;\n ne->nse=0;\n ne->se=NULL;;\n ne->bowt=0.0;\n ne->link=nglm->hashtab[hash];\n nglm->hashtab[hash]=ne;\n }\n\n return(ne);\n}\n\nstatic int se_cmp(const void *v1,const void *v2)\n{\n SEntry *s1,*s2;\n\n s1=(SEntry*)v1;s2=(SEntry*)v2;\n return((int)(s1->word-s2->word));\n}\n\n/*--------------------- ARPA-style NGrams ------------------------*/\n\nstatic int nep_cmp(const void *v1,const void *v2)\n{\n NEntry *n1,*n2;\n int res,i;\n\n res=0; n1=*((NEntry**)v1); n2=*((NEntry**)v2);\n for(i=NSIZE-2;i>=0;i--)\n if (n1->word[i]!=n2->word[i]) {\n res=(n1->word[i]-n2->word[i]);\n break;\n }\n return(res);\n}\n\n\n/* WriteNGram: Write n grams to file */\nstatic int WriteNGrams(FILE *file,NGramLM *nglm,int n,float scale)\n{\n NEntry *ne,*be,*ce,**neTab;\n SEntry *se;\n LogFloat prob;\n lmId ndx[NSIZE+1];\n int c,i,j,k,N,g=1,hash,neCnt,total;\n\n if (trace&T_TIO)\n printf(\"\\nn%1d \",n),fflush(stdout);\n fprintf(file,\"\\n\\\\%d-grams:\\n\",n);\n N=VectorSize(nglm->unigrams);\n\n neTab=(NEntry **) New(&gstack,sizeof(NEntry*)*nglm->counts[0]);\n\n for (hash=neCnt=0;hash<nglm->hashsize;hash++)\n for (ne=nglm->hashtab[hash]; ne!=NULL; ne=ne->link) {\n for (i=1,ce=ne;i<n;i++)\n if (ne->word[i-1]==0) {\n ce=NULL;\n break;\n }\n if (ce!=NULL)\n for (i=n;i<NSIZE;i++)\n if (ne->word[i-1]!=0) {\n ce=NULL;\n break;\n }\n if (ce!=NULL && ce->nse>0)\n neTab[neCnt++]=ce;\n }\n qsort(neTab,neCnt,sizeof(NEntry*),nep_cmp);\n\n total=0;\n for (c=n;c<=NSIZE;c++) ndx[c]=0;\n for (j=0;j<neCnt;j++) {\n ne=neTab[j];\n for (c=1;c<n;c++) ndx[c]=ne->word[c-1];\n if (ne!=NULL && ne->nse>0) {\n for (i=0,se=ne->se;i<ne->nse;i++,se++) {\n if (trace&T_TIO) {\n if ((g%25000)==0)\n printf(\". \"),fflush(stdout);\n if ((g%800000)==0)\n printf(\"\\n \"),fflush(stdout);\n g++;\n }\n ndx[0]=se->word;\n\n if (n<nglm->nsize) be=GetNEntry(nglm,ndx,FALSE);\n else be=NULL;\n if (be==NULL || be->nse==0) be=NULL;\n total++;\n if (n==1) prob=nglm->unigrams[se->word];\n else prob=se->prob;\n if (prob*scale<-99.999)\n fprintf(file,\"%+6.3f\",-99.999);\n else\n fprintf(file,\"%+6.4f\",prob*scale);\n c='\\t';\n for (k=n-1;k>=0;k--)\n if (rawMITFormat)\n fprintf(file,\"%c%s\",c,nglm->wdlist[ndx[k]]->name),c=' ';\n else\n fprintf(file,\"%c%s\",c,\n ReWriteString(nglm->wdlist[ndx[k]]->name,\n NULL,ESCAPE_CHAR)),c=' ';\n if (be!=NULL)\n fprintf(file,\"\\t%+6.4f\\n\",be->bowt*scale);\n else\n fprintf(file,\"\\n\");\n }\n }\n\n }\n Dispose(&gstack,neTab);\n if (trace&T_TIO)\n printf(\"\\n\"),fflush(stdout);\n return(total);\n}\n\n#define PROGRESS(g) \\\n if (trace&T_TIO) { \\\n if ((g%25000)==0) \\\n printf(\". \"),fflush(stdout); \\\n if ((g%800000)==0) \\\n printf(\"\\n \"),fflush(stdout); \\\n }\n\n\n#define NGHSIZE1 8192\n#define NGHSIZE2 32768\n#define NGHSIZE3 131072\n\n/* EXPORT->CreateBoNGram: Allocate and create basic NGram structures */\nNGramLM *CreateBoNGram(LModel *lm,int vocSize, int counts[NSIZE])\n{\n lmId ndx[NSIZE];\n int i,k;\n NGramLM *nglm;\n\n nglm = (NGramLM *) New(lm->heap, sizeof(NGramLM));\n lm->data.ngram = nglm;\n nglm->heap = lm->heap;\n\n for (i=0;i<=NSIZE;i++) nglm->counts[i]=0;\n for (i=1;i<=NSIZE;i++)\n if (counts[i]==0) break;\n else nglm->counts[i]=counts[i];\n nglm->nsize=i-1;\n\n /* Don't count final layer */\n for (k=0,i=1;i<nglm->nsize;i++) \n k+=nglm->counts[i];\n /* Then use total to guess NEntry hash size */\n if (k<25000) \n nglm->hashsize=NGHSIZE1;\n else if (k<250000) \n nglm->hashsize=NGHSIZE2;\n else \n nglm->hashsize=NGHSIZE3;\n\n nglm->hashtab=(NEntry **) New(lm->heap,sizeof(NEntry*)*nglm->hashsize);\n for (i=0; i<nglm->hashsize; i++) \n nglm->hashtab[i]=NULL;\n\n nglm->vocSize = vocSize;\n nglm->unigrams = CreateVector(lm->heap,nglm->vocSize);\n nglm->wdlist = (LabId *) New(lm->heap,nglm->vocSize*sizeof(LabId)); nglm->wdlist--;\n for (i=1;i<=nglm->vocSize;i++) nglm->wdlist[i]=NULL;\n\n for (i=0;i<NSIZE;i++) ndx[i]=0;\n GetNEntry(nglm,ndx,TRUE);\n\n return(nglm);\n} \n\n#define BIN_ARPA_HAS_BOWT 1\n#define BIN_ARPA_INT_LMID 2\n\n/* ReadNGrams: read n grams list from file */\nstatic int ReadNGrams(NGramLM *nglm,int n,int count, Boolean bin)\n{\n float prob;\n LabId wdid;\n SEntry *cse;\n char wd[255];\n lmId ndx[NSIZE+1];\n NEntry *ne,*le=NULL;\n int i, g, idx, total;\n unsigned char size, flags=0;\n\n cse = (SEntry *) New(nglm->heap,count*sizeof(SEntry));\n for (i=1;i<=NSIZE;i++) ndx[i]=0;\n\n if (trace&T_TIO)\n printf(\"\\nn%1d \",n),fflush(stdout);\n\n total=0;\n for (g=1; g<=count; g++){\n PROGRESS(g);\n\n if (bin) {\n size = GetCh (&source);\n flags = GetCh (&source);\n }\n \n prob = GetFloat(bin)*LN10;\n\n if (n==1) { /* unigram treated as special */\n ReadLMWord(wd);\n wdid = GetLabId(wd, TRUE);\n if (wdid->aux != NULL)\n HError(8150,\"ReadNGrams: Duplicate word (%s) in 1-gram list\",\n wdid->name);\n wdid->aux = (Ptr)g;\n nglm->wdlist[g] = wdid;\n nglm->unigrams[g] = prob;\n ndx[0]=g;\n } else { /* bigram, trigram, etc. */\n for (i=0;i<n;i++) {\n if (bin) {\n if (flags & BIN_ARPA_INT_LMID) {\n unsigned int ui;\n if (!ReadInt (&source, (int *) &ui, 1, bin))\n HError (9999, \"ReadNGrams: failed reading int lm word id\");\n idx = ui;\n }\n else {\n unsigned short us;\n if (!ReadShort (&source, (short *) &us, 1, bin))\n HError (9999, \"ReadNGrams: failed reading short lm word id at\");\n idx = us;\n }\n }\n else {\n ReadLMWord(wd);\n wdid = GetLabId(wd, FALSE);\n idx = (wdid==NULL?0:(int)wdid->aux);\n }\n if (idx<1 || idx>nglm->vocSize)\n HError(8150,\"ReadNGrams: Unseen word (%s) in %dGram\",wd,n);\n ndx[n-1-i]=idx;\n }\n }\n\n total++;\n ne = GetNEntry(nglm,ndx+1,FALSE);\n if (ne == NULL)\n HError(8150,\"ReadNGrams: Backoff weight not seen for %dth %dGram\",g,n);\n if (ne!=le) {\n if (le != NULL && ne->se != NULL)\n HError(8150,\"ReadNGrams: %dth %dGrams out of order\",g,n);\n if (le != NULL) {\n if (le->nse==0) {\n le->se=NULL;\n } else {\n qsort(le->se,le->nse,sizeof(SEntry),se_cmp);\n }\n }\n ne->se = cse;\n ne->nse = 0;\n le = ne;\n }\n cse->prob = prob;\n cse->word = ndx[0];\n ne->nse++; cse++;\n\n /* read back-off weight */\n if (bin) {\n if (flags & BIN_ARPA_HAS_BOWT) {\n ne = GetNEntry(nglm,ndx,TRUE);\n ne->bowt = GetFloat (TRUE)*LN10;\n }\n }\n else {\n SkipWhiteSpace(&source);\n if (!source.wasNewline) {\n ne=GetNEntry(nglm,ndx,TRUE);\n ne->bowt = GetFloat(FALSE)*LN10;\n }\n }\n }\n\n /* deal with the last accumulated set */\n if (le != NULL) {\n if (le->nse==0) {\n le->se=NULL;\n } else {\n qsort(le->se,le->nse,sizeof(SEntry),se_cmp);\n }\n }\n\n if (trace&T_TIO)\n printf(\"\\n\"),fflush(stdout);\n\n return(total);\n}\n\n/* ReadBoNGram: read and store WSJ/DP format ngram */\nstatic void ReadBoNGram(LModel *lm,char *fn)\n{\n NGramLM *nglm;\n int i,j,k,counts[NSIZE+1];\n Boolean ngBin[NSIZE+1];\n char buf[MAXSTRLEN+1],syc[64];\n char ngFmtCh;\n\n if (trace&T_TIO)\n printf(\"\\nBOffB \"),fflush(stdout);\n\n if(InitSource(fn,&source,LangModFilter)<SUCCESS)\n HError(8110,\"ReadBoNGram: Can't open file %s\", fn);\n GetInLine(buf);\n SyncStr(buf,\"\\\\data\\\\\");\n for (i=1;i<=NSIZE;i++) counts[i]=0;\n for (i=1;i<=NSIZE;i++) {\n GetInLine(buf);\n if (sscanf(buf, \"ngram %d%c%d\", &j, &ngFmtCh, &k)!=3 && i>1)\n break;\n if (i!=j || k==0) \n HError(8150,\"ReadBoNGram: %dGram count missing (%s)\",i,buf);\n\n switch (ngFmtCh) {\n case '=':\n ngBin[j] = FALSE;\n break;\n case '~':\n ngBin[j] = TRUE;\n break;\n default:\n HError (9999, \"ReadARPALM: unknown ngram format type '%c'\", ngFmtCh);\n }\n counts[j]=k;\n }\n\n if (ngBin[1])\n HError (8113, \"ReadARPALM: unigram must be stored as text\");\n\n nglm=CreateBoNGram(lm,counts[1],counts);\n for (i=1;i<=nglm->nsize;i++) {\n sprintf(syc,\"\\\\%d-grams:\",i);\n SyncStr(buf,syc);\n ReadNGrams(nglm,i,nglm->counts[i], ngBin[i]);\n }\n SyncStr(buf,\"\\\\end\\\\\");\n CloseSource(&source);\n\n if (trace&T_TIO) {\n printf(\"\\n NEntry==%d \",nglm->counts[0]);\n for(i=1;i<=nglm->nsize;i++)\n printf(\" %d-Grams==%d\",i,nglm->counts[i]);\n printf(\"\\n\\n\");\n fflush(stdout);\n }\n}\n/* WriteBoNGram: write out WSJ/DP format ngram */\nstatic void WriteBoNGram(LModel *lm,char *fn,int flags)\n{\n int i,k;\n FILE *file;\n NGramLM *nglm;\n Boolean isPipe;\n\n nglm = lm->data.ngram;\n file=FOpen(fn,LangModOFilter,&isPipe);\n fprintf(file,\"\\\\data\\\\\\n\");\n\n for (i=1;i<=nglm->nsize;i++) {\n fprintf(file,\"ngram %d=%d\\n\",i,nglm->counts[i]);\n }\n for (i=1;i<=nglm->nsize;i++) {\n k = WriteNGrams(file,nglm,i,1.0/LN10);\n if (k!=nglm->counts[i])\n HError(-8190,\"WriteBoNGram: Counts disagree for %dgram (%d vs %d)\",\n i, k, nglm->counts[i]);\n }\n fprintf(file,\"\\n\\\\end\\\\\\n\");\n FClose(file,isPipe);\n}\n\nvoid ClearBoNGram(LModel *lm)\n{\n NGramLM *nglm = lm->data.ngram;\n int i;\n \n for(i=1;i<=nglm->vocSize;i++)\n if (nglm->wdlist[i]!=NULL) nglm->wdlist[i]->aux=0;\n}\n\n/* -------------- Matrix Bigram Handling Routines ----------- */\n\nMatBiLM *CreateMatBigram(LModel *lm,int nw)\n{\n MatBiLM *matbi;\n \n matbi = (MatBiLM *) New(lm->heap,sizeof(MatBiLM));\n lm->data.matbi = matbi;\n matbi->heap = lm->heap;\n \n matbi->numWords = nw;\n matbi->wdlist = (LabId *) New(lm->heap,sizeof(LabId)*(nw+1));\n matbi->bigMat = CreateMatrix(lm->heap,nw,nw);\n ZeroMatrix(matbi->bigMat);\n return(matbi);\n}\n\n/* ReadRow: read a row from bigram file f into v */\nint ReadRow(Vector v)\n{\n int i,j,N,cnt,c;\n float x;\n\n N = VectorSize(v);\n i=0; \n while(!source.wasNewline) {\n x = GetFloat(FALSE);\n c=GetCh(&source);\n if (c == '*')\n cnt=GetInt();\n else {\n UnGetCh(c,&source);\n cnt=1;\n }\n SkipWhiteSpace(&source);\n for (j=0;j<cnt;j++) {\n i++;\n if (i<=N) v[i] = x;\n }\n }\n return(i);\n}\n\n/* ReadBigram: load a bigram from given file */\nstatic void ReadMatBigram(LModel *lm,char *fn)\n{\n Vector vec;\n char buf[132];\n int P,p,j;\n float sum,x;\n LabId id;\n MatBiLM *matbi;\n \n if (trace&T_TIO)\n printf(\"\\nMB \"),fflush(stdout);\n\n if(InitSource(fn,&source,LangModFilter)<SUCCESS)\n HError(8110,\"ReadMatBigram: Can't open file %s\", fn);\n vec = CreateVector(&gcheap,MAX_LMID);\n ReadLMWord(buf);SkipWhiteSpace(&source);\n id=GetLabId(buf,TRUE);\n P = ReadRow(vec);\n\n if (P<=0 || P >MAX_LMID)\n HError(8151,\"ReadMatBigram: First row invalid (%d entries)\",P);\n\n matbi=CreateMatBigram(lm,P);\n\n matbi->wdlist[1] = id;\n for (p=1;p<=P;p++) matbi->bigMat[1][p]=vec[p];\n id->aux=(Ptr) 1;\n Dispose(&gcheap,vec);\n\n for (sum=0.0, j=1; j<=P; j++) {\n x = matbi->bigMat[1][j];\n if (x<0)\n HError(8151,\"ReadMatBigram: In bigram, entry %d for %s is -ve (%e)\",\n j,buf,x);\n sum += x;\n matbi->bigMat[1][j]=((x<MINLARG)?LZERO:log(x));\n }\n if (sum < 0.99 || sum > 1.01)\n HError(-8151,\"ReadMatBigram: Row %d of bigram %s adds up to %f\",1,fn,sum);\n\n for (p=2; ReadLMWord(buf); p++) {\n if (trace&T_TIO) {\n if ((p%25)==0)\n printf(\". \"),fflush(stdout);\n if ((p%800)==0)\n printf(\"\\n \"),fflush(stdout);\n }\n if (p>P)\n HError(8150,\"ReadMatBigram: More rows than columns in bigram %s\",fn);\n id=GetLabId(buf,TRUE);\n if ((int)id->aux != 0) \n HError(8150,\"ReadMatBigram: Duplicated name %s in bigram %s\",buf,fn);\n id->aux = (Ptr) p;\n matbi->wdlist[p] = id;\n SkipWhiteSpace(&source);\n if (ReadRow(matbi->bigMat[p])!=P)\n HError(8150,\"ReadMatBigram: Wrong number of items in row %d\",p);\n for (sum=0.0, j=1; j<=P; j++) {\n x = matbi->bigMat[p][j];\n if (x<0)\n HError(8151,\"ReadMatBigram: In bigram, entry %d for %s is -ve (%e)\",\n j,buf,x);\n sum += x;\n matbi->bigMat[p][j]=((x<MINLARG)?LZERO:log(x));\n }\n if (sum < 0.99 || sum > 1.01)\n HError(-8151,\"ReadMatBigram: Row %d of bigram %s adds up to %f\",p,fn,sum);\n }\n if (P>p)\n HError(8150,\"ReadMatBigram: More columns than rows in bigram %s\",fn);\n if (trace&T_TIO)\n printf(\"\\n\"),fflush(stdout);\n CloseSource(&source);\n}\n\n/* WriteMatBigram: write out old HVite format bigram */\nstatic void WriteMatBigram(LModel *lm,char *fn,int flags)\n{\n const float epsilon = 0.000001;\n MatBiLM *matbi;\n FILE *file;\n Boolean isPipe;\n Vector v;\n double x,y;\n int i,j,rep;\n\n if (trace&T_TIO)\n printf(\"\\nMB \"),fflush(stdout);\n\n matbi = lm->data.matbi;\n file=FOpen(fn,LangModOFilter,&isPipe);\n\n for (i=1;i<=matbi->numWords;i++) {\n if (trace&T_TIO) {\n if ((i%25)==0)\n printf(\". \"),fflush(stdout);\n if ((i%800)==0)\n printf(\"\\n \"),fflush(stdout);\n }\n\n fprintf(file,\"%-8s \",ReWriteString(matbi->wdlist[i]->name,\n NULL,ESCAPE_CHAR));\n\n v=matbi->bigMat[i];rep=0;x=-1.0;\n for (j=1;j<=matbi->numWords;j++){\n y = L2F(v[j]);\n if (fabs(y - x) <= epsilon) rep++;\n else {\n if (rep>0) {\n fprintf(file,\"*%d\",rep+1);\n rep=0;\n }\n x = y;\n if (x == 0.0)\n fprintf(file,\" 0\");\n else if (x == 1.0)\n fprintf(file,\" 1\");\n else\n fprintf(file,\" %e\",x);\n }\n }\n if (rep>0)\n fprintf(file,\"*%d\",rep+1);\n fprintf(file,\"\\n\");\n }\n FClose(file,isPipe);\n if (trace&T_TIO)\n printf(\"\\n\"),fflush(stdout);\n}\n\n/*------------------------- User Interface --------------------*/\n\n/* EXPORT GetLMProb: return probability of word wd_id following pr_id[] */\nfloat GetLMProb(LModel *lm, LabId prid[NSIZE], LabId wdid)\n{\n LabId cpid[NSIZE];\n NEntry *ne;\n SEntry *se;\n lmId p, q, word, ndx[NSIZE];\n LogFloat bowt,prob;\n int i, s;\n \n switch (lm->type) {\n case boNGram:\n word = (int)wdid->aux;\n if (word==0 || word>lm->data.ngram->vocSize)\n return(LZERO);\n for (s=-1,i=0;i<NSIZE;i++)\n if (prid[i]!=NULL) \n ndx[i]=(int)prid[i]->aux, cpid[i]=prid[i], s=i;\n else\n ndx[i]=0, cpid[i]=NULL;\n\n /* If no answer back-off to unigram */\n if (s<0) {\n if (word!=0)\n return(lm->data.ngram->unigrams[word]);\n else\n return(log(1.0/lm->data.ngram->vocSize));\n }\n\n cpid[s]=0;\n ne = GetNEntry(lm->data.ngram,ndx,FALSE);\n if (ne) {\n /* Replace with bsearch equivalent */\n for (i=0, se=ne->se; i<ne->nse; i++,se++)\n if (se->word==word) \n return(se->prob); /* Ngram found */\n bowt=ne->bowt;\n }\n else {\n bowt=0.0;\n }\n \n if (s==0)\n return(lm->data.ngram->unigrams[word]+bowt); /* Backoff to unigram */\n else\n return(bowt+GetLMProb(lm,cpid,wdid)); /* else recurse */\n break;\n case matBigram:\n p=(int) prid[0]->aux;\n q=(int) wdid->aux;\n return(lm->data.matbi->bigMat[p][q]);\n default:\n prob=LZERO;\n }\n return(prob);\n}\n\n/* EXPORT ReadLModel: Determine LM type and then read-in */\nLModel *ReadLModel(MemHeap *heap,char *fn)\n{\n LModel *lm;\n LMType type;\n char buf[MAXSTRLEN+1];\n int i;\n\n lm=(LModel*)New(heap,sizeof(LModel));\n lm->heap=heap;\n lm->name=CopyString(heap,fn);\n\n if(InitSource(fn,&source,LangModFilter)<SUCCESS)\n HError(8110,\"ReadLModel: Can't open file %s\", fn);\n type=boNGram;i=0;\n do {\n if (i++==1000) {\n type=matBigram;\n break;\n }\n GetInLine(buf);\n }\n while (strcmp(buf, \"\\\\data\\\\\")!=0);\n CloseSource(&source);\n\n lm->type=type;\n switch(type) {\n case boNGram:\n ReadBoNGram(lm,fn);\n break;\n case matBigram:\n ReadMatBigram(lm,fn);\n break;\n }\n return(lm);\n}\n\n\n/* EXPORT WriteLModel: Determine LM type and then write-out */\nvoid WriteLModel(LModel *lm,char *fn,int flags)\n{\n switch(lm->type) {\n case boNGram:\n WriteBoNGram(lm,fn,flags);\n break;\n case matBigram:\n WriteMatBigram(lm,fn,flags);\n break;\n }\n}\n\nvoid ClearLModel(LModel *lm)\n{\n switch(lm->type) {\n case boNGram:\n ClearBoNGram(lm);\n break;\n case matBigram:\n break;\n }\n}\n\n/*----------------------------------------------------------------------*/\n\n#ifndef NO_LAT_LM\n/* FindSEntry\n\n find SEntry for wordId in array using binary search\n*/\nstatic SEntry *FindSEntry (SEntry *se, lmId pronId, int l, int h)\n{\n /*#### here l,h,c must be signed */\n int c;\n\n while (l <= h) {\n c = (l + h) / 2;\n if (se[c].word == pronId) \n return &se[c];\n else if (se[c].word < pronId)\n l = c + 1;\n else\n h = c - 1;\n }\n\n return NULL;\n}\n\n/* LMTransProb_ngram\n\n return logprob of transition from src labelled word. Also return dest state.\n ngram case\n*/\nLogFloat LMTrans (LModel *lm, LMState src, LabId wdid, LMState *dest)\n{\n NGramLM *nglm;\n LogFloat lmprob;\n lmId hist[NSIZE] = {0}; /* initialise whole array to zero! */\n int i, l;\n NEntry *ne;\n SEntry *se;\n lmId word;\n\n assert (lm->type == boNGram);\n nglm = lm->data.ngram;\n\n word = (int) wdid->aux;\n\n if (word==0 || word>lm->data.ngram->vocSize) {\n HError (-9999, \"word %d not in LM wordlist\", word);\n *dest = NULL;\n return (LZERO);\n }\n\n ne = src;\n \n if (!src) { /* unigram case */\n lmprob = nglm->unigrams[word];\n }\n else {\n /* lookup prob p(word | src) */\n /* try to find pronid in SEntry array */\n se = FindSEntry (ne->se, word, 0, ne->nse - 1);\n\n assert (!se || (se->word == word));\n\n if (se) /* found */\n lmprob = se->prob;\n else { /* not found */\n lmprob = 0.0;\n l = 0;\n hist[NSIZE-1] = 0;\n for (i = 0; i < NSIZE-1; ++i) {\n hist[i] = ne->word[i];\n if (hist[i] != 0)\n l = i;\n } /* l is now the index of the last (oldest) non zero element */\n \n for ( ; l > 0; --l) {\n if (ne)\n lmprob += ne->bowt;\n hist[l] = 0; /* back-off: discard oldest word */\n ne = GetNEntry (nglm, hist, FALSE);\n if (ne) { /* skip over non existing hists. fix for weird LMs */\n /* try to find pronid in SEntry array */\n se = FindSEntry (ne->se, word, 0, ne->nse - 1);\n assert (!se || (se->word == word));\n if (se) { /* found it */\n lmprob += se->prob;\n l = -1;\n break;\n }\n }\n }\n if (l == 0) { /* backed-off all the way to unigram */\n assert (!se);\n lmprob += ne->bowt;\n lmprob += nglm->unigrams[word];\n }\n }\n }\n\n\n /* now determine dest state */\n if (src) {\n ne = (NEntry *) src;\n \n l = 0;\n hist[NSIZE-1] = 0;\n for (i = 1; i < NSIZE-1; ++i) {\n hist[i] = ne->word[i-1];\n if (hist[i] != 0)\n l = i;\n } /* l is now the index of the last (oldest) non zero element */\n }\n else {\n for (i = 1; i < NSIZE-1; ++i)\n hist[i] = 0;\n l = 1;\n }\n\n hist[0] = word;\n\n ne = (LMState) GetNEntry (nglm, hist, FALSE);\n for ( ; !ne && (l > 0); --l) {\n hist[l] = 0; /* back off */\n ne = (LMState) GetNEntry (nglm, hist, FALSE);\n }\n /* if we left the loop because l=0, then ne is still NULL, which is what we want */\n\n *dest = ne;\n\n#if 0\n printf (\"lmprob = %f dest %p\\n\", lmprob, *dest);\n#endif\n\n return (lmprob);\n}\n#endif\n\n\n/* ------------------------- End of HLM.c ------------------------- */\n" }, { "alpha_fraction": 0.43006080389022827, "alphanum_fraction": 0.43788009881973267, "avg_line_length": 30.108108520507812, "blob_id": "51b84cc0d719bd7ef76cff19e49d51e1c03fb41c", "content_id": "343530a295ce4f3aaad50427888e46a1888481b5", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2302, "license_type": "permissive", "max_line_length": 115, "num_lines": 74, "path": "/code/audioSR/HTK/htk/HLMLib/Makefile", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------- \n# \n# ___ \n# |_| | |_/ SPEECH \n# | | | | \\ RECOGNITION \n# ========= SOFTWARE \n# \n# \n# ----------------------------------------------------------- \n# Copyright: Cambridge University\n# 1995-2005 Engineering Department\n# http://htk.eng.cam.ac.uk\n# http://mi.eng.cam.ac.uk\n# \n# Use of this software is governed by a License Agreement \n# ** See the file License for the Conditions of Use ** \n# ** This banner notice must not be removed ** \n# \n# ----------------------------------------------------------- \n# File: HLMLib/Makefile. Generated from Makefile.in by configure.\n# ----------------------------------------------------------- \n\nSHELL = /bin/sh\nsrcdir = .\ntop_srcdir = ..\n\nprefix = /usr/local\nexec_prefix = ${prefix}\nbindir = ${exec_prefix}/bin\nsbindir = ${exec_prefix}/sbin\nlibexecdir = ${exec_prefix}/libexec\ndatadir = ${prefix}/share\nsysconfdir = ${prefix}/etc\nsharedstatedir = ${prefix}/com\nlocalstatedir = ${prefix}/var\nlibdir = ${exec_prefix}/lib\ninfodir = ${prefix}/share/info\nmandir = ${prefix}/share/man\nincludedir = ${prefix}/include\noldincludedir = /usr/include\n\nCC = gcc\nCFLAGS = -m32 -ansi -D_SVID_SOURCE -DOSS_AUDIO -D'ARCH=\"x86_64\"' -Wall -Wno-switch -g -O2 -DSANITY -I. -I../HTKLib/\nRANLIB = ranlib\nINSTALL = /usr/bin/install -c\ntarget = HLMLib.a\nmodules = LModel.o LPMerge.o LPCalc.o LUtil.o LWMap.o LCMap.o LGBase.o\n\nall: $(target)\n\n$(target): $(modules)\n\tif [ -f $@ ] ; then /bin/rm $@ ; fi\n\tar rv $@ $^\n\t$(RANLIB) $@\n\n.PHONY: clean cleanup depend mkinstalldir install\n\nclean:\n\t-rm -f $(modules) $(target)\n\ncleanup:\n\t-rm -f $(modules)\n\ndepend:\n\t-makedepend -Y *.c\n\ndistclean: clean\n\t-rm -f Makefile\n\ninstall: $(target) mkinstalldir \n\t$(INSTALL) -m 755 $< $(libdir)\n\nmkinstalldir:\n\t-mkdir -p $(libdir)\n" }, { "alpha_fraction": 0.6219995617866516, "alphanum_fraction": 0.6487163305282593, "avg_line_length": 32.978721618652344, "blob_id": "664b3bb2d49f4288d0666cefd6e27422d86d5138", "content_id": "9f91c71c0025910599b85d9c318f6a5f3e5f29b9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4791, "license_type": "permissive", "max_line_length": 134, "num_lines": 141, "path": "/code/audioSR/Spoken-language-identification-master/ensembling/ensemble.theano.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" Usage: python ensemble.theano.py model1 [another_model]*\n \nfor GPU mode\n 1. export PATH=$PATH:/usr/local/cuda-6.5/bin\n 2. THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32,nvcc.flags='-arch=sm_30' python ensemble.theano.py model1 [another_model]*\n\"\"\"\n\nimport cPickle as pickle\nimport sys\nimport caffe\nimport numpy as np\n\ncaffe.set_mode_gpu()\n\ndef get_score(probs, label):\n pred = sorted([(x, it) for it, x in enumerate(probs)], reverse=True)\n if (pred[0][1] == label):\n return 1000\n if (pred[1][1] == label):\n return 400\n if (pred[2][1] == label): \n return 160\n return 0\n \ndef get_full_score(preds, labels):\n topCoderScore = 0.0\n for i in range(len(labels)):\n topCoderScore += get_score(preds[i], labels[i]) \n \n return topCoderScore / len(labels) * 3520\n\n####################### COLLECTING INFO ABOUT LANGS ############################\nfile = open('../trainingData.csv')\ndata = file.readlines()[1:]\nlangs = set()\nfor line in data:\n filepath, language = line.split(',')\n language = language.strip()\n langs.add(language)\nlangs = sorted(langs)\nfile.close()\n\nn_models = len(sys.argv) - 1\nX = np.zeros((12320, n_models * 176), dtype=np.float32)\nfor iter in range(n_models):\n csvpath = 'probs/val/' + sys.argv[iter + 1]\n csv = open(csvpath, 'r')\n for row_id, line in enumerate(csv.readlines()):\n mas = line.split(',')\n mas = np.array([float(x) for x in mas], dtype=np.float32)\n X[row_id, 176*iter:176*(iter+1)] = mas\n csv.close()\n \nY = []\nlabel_file = open('../valEqual.csv')\nfor line in label_file.readlines():\n Y.append(int(line.split(',')[1]))\nlabel_file.close()\n\nprint \"X.shape =\", X.shape\nprint \"len(Y) =\", len(Y)\n\nfor iter in range(n_models):\n print \"score of model %d = %f\" % (iter+1, get_full_score(X[:, 176*iter:176*(iter+1)], Y))\n\n\n######################### TRAINING ENSEMBLING MODEL ############################\nimport theano\nimport theano.tensor as T\nimport lasagne\nimport lasagne.layers as layers\n\nn_train_examples = 10000\nX = X.astype(theano.config.floatX)\ntrainX = X[:n_train_examples]\ntrainY = Y[:n_train_examples]\nvalX = X[n_train_examples:]\nvalY = Y[n_train_examples:]\n\ninput_var = T.matrix('X')\ntarget_var = T.ivector('y')\n\nfrom lasagne.nonlinearities import softmax, sigmoid, rectify\nnetwork = lasagne.layers.InputLayer((None, X.shape[1]), input_var)\nnetwork = lasagne.layers.DenseLayer(network, 4000, nonlinearity=rectify)\nnetwork = lasagne.layers.DenseLayer(lasagne.layers.dropout(network, 0.5), 176, nonlinearity=softmax)\n\nprediction = lasagne.layers.get_output(network)\nloss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\nloss = loss.mean() + 0 * lasagne.regularization.regularize_network_params(\n network, lasagne.regularization.l2)\n\nparams = lasagne.layers.get_all_params(network, trainable=True)\nlearning_rate = theano.shared(np.float32(0.2))\nupdates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=learning_rate, momentum=0.9)\ntrain_fn = theano.function([input_var, target_var], loss, updates=updates)\nvalidation_fn = theano.function([input_var, target_var], loss)\n\nfor epoch in range(1000):\n train_loss = train_fn(trainX, trainY)\n val_loss = validation_fn(valX, valY)\n print \"Epoch %d: train_loss = %f, val_loss = %f, lr = %f\" % (epoch + 1, train_loss, val_loss, learning_rate.get_value())\n if (epoch > 0 and epoch % 200 == 0):\n learning_rate.set_value(np.float32(learning_rate.get_value() * 0.7))\n \ntest_prediction = lasagne.layers.get_output(network, deterministic=True)\npredict_fn = theano.function([input_var], test_prediction)\nall_predictions = predict_fn(valX)\n\nscore = 0.0\nfor probs, label in zip(all_predictions, valY):\n score += get_score(probs, label)\nprint \"Final score on ensembling validaion = %f\" % score\nprint \"Expected score = %f\" % (score / len(valY) * 3520)\n\n\nprint \"\\n\\n==> creating submission...\"\nX = np.zeros((12320, n_models * 176), dtype=np.float32)\nfor iter in range(n_models):\n csvpath = 'probs/test/' + sys.argv[iter + 1]\n csv = open(csvpath, 'r')\n for row_id, line in enumerate(csv.readlines()):\n mas = line.split(',')\n mas = np.array([float(x) for x in mas], dtype=np.float32)\n X[row_id, 176*iter:176*(iter+1)] = mas\n csv.close()\n\nprediction = predict_fn(X)\nprint \"prediction.shape =\", prediction.shape\nensembled = open('ensembled.csv', 'w')\nfor probs in prediction:\n out = [str(x) for x in probs]\n ensembled.write(','.join(out) + '\\n')\n\n\n\"\"\"\n######################### SAVING MODEL TO BE ABLE TO REPRODUCE #################\nprint \"==> Saving model...\"\nwith open(\"model.pickle\", 'w') as save_file:\n\tpickle.dump(obj = {'params' : layers.get_all_param_values(network)}, file = save_file, protocol = -1)\n\"\"\"\n" }, { "alpha_fraction": 0.5799423456192017, "alphanum_fraction": 0.5887975096702576, "avg_line_length": 40.363372802734375, "blob_id": "8ecabd099caf0a184affce30911075bcdba10592", "content_id": "6a75c2d521c791349ec153f67a11464d0cc47506", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 14229, "license_type": "permissive", "max_line_length": 188, "num_lines": 344, "path": "/code/audioSR/Preprocessing/TIMIT_utils_modified.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\nimport soundfile as sf\nfrom python_speech_features import mfcc\nimport pickle\nimport sys\nimport theano\nimport numpy as np\nimport scipy\nimport librosa\n\nTIMIT_original_dir = '/home/matthijs/TCDTIMIT/TIMIT/original'\nTIMIT_fixed_dir = '/home/matthijs/TCDTIMIT/TIMIT/fixed'\n\n# convert the 61 phonemes from TIMIT to the reduced set of 39 phonemes\ndef convert_phonemes(rootdir = TIMIT_fixed_dir):\n return 0\n\n\ndef get_data(rootdir = TIMIT_fixed_dir): \n inputs = []\n targets = []\n alphabet = {}\n\n # count number of files for showing progress.\n wavCounter = 0\n for root, dirs, files in os.walk(rootdir):\n for file in files:\n if file.endswith('.wav'):\n wavCounter += 1\n print \"There are \", wavCounter, \" files to be processed\"\n from progress_bar import show_progress\n processed = 0\n\n for dir_path, sub_dirs, files in os.walk(rootdir):\n for file in files:\n if (os.path.join(dir_path, file)).endswith('.wav'):\n\n ## Get the data itself: inputs and targets\n #--------------------------\n wav_file_name = os.path.join(dir_path, file)\n\n # from https://github.com/dtjchen/spoken-command-processor/blob/master/model/utils.py\n sampling_rate, frames = scipy.io.wavfile.read(wav_file_name)\n\n segment_duration_ms = 20\n n_fft = int((segment_duration_ms / 1000.) * sampling_rate)\n\n hop_duration_ms = 10\n hop_length = int((hop_duration_ms / 1000.) * sampling_rate)\n\n mfcc_count = 13\n\n mfccs = librosa.feature.mfcc(\n y=frames,\n sr=sampling_rate,\n n_mfcc=mfcc_count,\n hop_length=hop_length,\n n_fft=n_fft\n )\n mfcc_delta = librosa.feature.delta(mfccs)\n mfcc_delta2 = librosa.feature.delta(mfccs, order=2)\n #full_input = np.vstack([mfccs, mfcc_delta, mfcc_delta2])\n full_input = np.concatenate((mfccs, mfcc_delta, mfcc_delta2), axis=1)\n\n inputs.append(np.asarray(full_input, dtype=theano.config.floatX))\n\n #return mfccs_and_deltas, hop_length, n_fft\n\n ### OLD ###\n # wav_file_name = os.path.join(dir_path, file)\n # input_data, f_s = sf.read(wav_file_name)\n # mfcc_feat = mfcc(input_data,f_s)\n # #Delta features\n # delta_feat = mfcc_feat[:-1]-mfcc_feat[1:]\n # #Delta-Delta features\n # deltadelta_feat = delta_feat[:-1]-delta_feat[1:]\n #\n # #Removing the first two frames\n # mfcc_feat = mfcc_feat[2:]\n # delta_feat = delta_feat[1:]\n #\n # #Concatenating mfcc, delta and delta-delta features\n # full_input = np.concatenate((mfcc_feat,delta_feat,deltadelta_feat), axis=1)\n # inputs.append(np.asarray(full_input, dtype=theano.config.floatX))#Rakeshvar wants one frame along each column but i am using Lasagne\n\n text_file_name = wav_file_name[:-4] + '.txt'\n target_data_file = open(text_file_name)\n target_data = str(target_data_file.read()).lower().translate(None, '!:,\".;?')\n target_data = target_data[8:-1] #No '.' in lexfree dictionary\n targets.append(target_data)\n\n ## Get alphabet\n # ------------------------\n transcription_filename = os.path.join(dir_path, file)[:-4] + '.txt'\n transcription_file = open(transcription_filename, 'r')\n transcription = str(transcription_file.read()).lower().translate(None, '!:,\".;?')\n transcription = transcription[8:-1]\n\n # count number of occurences of each character\n for char in transcription:\n if not char in alphabet:\n alphabet.update({char: 1})\n else:\n alphabet[char] += 1\n\n processed += 1\n if (processed % 100 == 0):\n show_progress(float(processed) / wavCounter)\n print \" | Read\", processed, \"files out of\", wavCounter\n\n print 'TIMIT Alphabet:\\n', alphabet\n alphabet_filename = 'TIMIT_Alphabet.pkl'\n with open(alphabet_filename, 'wb') as f:\n pickle.dump(alphabet, f, protocol=2)\n\n return inputs, targets, alphabet\n\n\n\ndef get_TIMIT_targets_one_hot(inputs, targets, alphabet):\n list_of_alphabets = [key for key in alphabet]\n list_of_alphabets.sort()\n # print list_of_alphabets\n\n num_targets = len(list_of_alphabets)\n # print len(targets[0])\n # targets_as_alphabet_indices = [[seq.index(char) for char in seq] for seq in targets]\n one_hot_targets = [[np.zeros((num_targets)) for char in example] for example in targets]\n # print len(one_hot_targets[0]), one_hot_targets[0]#, len(one_hot_targets[0][0][0])\n for example_num in range(len(targets)):\n for char_num in range(len(targets[example_num])):\n # print targets[example_num][char_num]\n # print list_of_alphabets.index(targets[example_num][char_num])\n one_hot_targets[example_num][char_num][list_of_alphabets.index(targets[example_num][char_num])]=1\n return one_hot_targets\n\ndef get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet):\n list_of_alphabets = [key for key in alphabet]\n list_of_alphabets.sort()\n print('list of alphabets: {}'.format(list_of_alphabets))\n print len(list_of_alphabets)\n #print list_of_alphabets.index(22)\n print targets[0]\n targets_as_alphabet_indices = [[list_of_alphabets.index(char) for char in target] for target in targets]\n print \"Example target and alphabet indices: \"\n print 'target = {} \\n alphabet indices = {}'.format(targets[0], targets_as_alphabet_indices[0])\n\n return targets_as_alphabet_indices\n\ndef index2char_TIMIT(input_index_seq = None, TIMIT_pkl_file = os.path.join(os.getcwd(),'TIMIT_data_prepared_for_CTC.pkl')):\n with open(TIMIT_pkl_file,'rb') as f:\n data = pickle.load(f)\n list_of_alphabets = data['chars']\n blank_char = '_'\n list_of_alphabets.append(blank_char)\n output_character_seq = [list_of_alphabets[i] for i in input_index_seq]\n output_sentence = ''.join(output_character_seq)\n # for i in input_index_seq:\n # output_character_seq.append(list_of_alphabets[i])\n\n return output_sentence\n\ndef create_mask(TIMIT_pkl_file = os.path.join(os.getcwd(),'TIMIT_data_prepared_for_CLM.pkl')):\n with open(TIMIT_pkl_file,'rb') as f:\n data = pickle.load(f)\n x = data['x']\n max_seq_len = max([len(x[i]) for i in range(len(x))])\n mask = np.zeros((len(x),max_seq_len))\n for eg_num in range(len(x)):\n mask[eg_num , 0:len(x[eg_num])] = 1\n return mask \n\n\ndef prepare_TIMIT_for_CTC(dataset='train', savedir = os.getcwd(), test=0):\n print 'Getting: Inputs, Targets, Alphabet...'\n print \"#########################\"\n rootdir = os.path.join(TIMIT_fixed_dir,dataset)\n\n if (test):\n ### Read from pkl for faster testing\n in_file_name= savedir + '/TIMIT_data_prepared_for_CTC.pkl'\n with open(in_file_name, 'rb') as f:\n reclaimed_data = pickle.load(f)\n inputs = reclaimed_data['x']\n targets = reclaimed_data['y_char']\n targets_as_alphabet_indices = reclaimed_data['y_indices']\n targets_one_hot = reclaimed_data['y_onehot']\n alphabet = reclaimed_data['chars']\n sample_input = inputs[0]\n sample_target = targets[0]\n # print sample_input\n # print sample_target\n else:\n inputs,targets, alphabet= get_data(rootdir)\n print \"Generating coded targets...\"\n print \"#########################\"\n targets_as_alphabet_indices = get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet)\n targets_one_hot = get_TIMIT_targets_one_hot(inputs, targets, alphabet)\n\n list_of_alphabets = [key for key in alphabet]\n list_of_alphabets.sort()\n print \"Alphabet list: \", list_of_alphabets\n\n targets_as_alphabet_indices = [[list_of_alphabets.index(char) for char in target] for target in targets]\n print \"Example target and alphabet indices: \"\n print 'target = {} \\nalphabet indices = {}'.format(targets[0], targets_as_alphabet_indices[0])\n\n # prepare file structure to store data\n n_batch = len(inputs)\n max_input_length = max([len(inputs[i]) for i in range(len(inputs))])\n input_dim = len(inputs[0][0])\n X = np.zeros((n_batch, max_input_length, input_dim))\n input_mask = np.zeros((n_batch, max_input_length)) # 1 if there's input data on this row\n\n # read data, store in created structures\n print \"Storing data in X matrix...\"\n for example_id in range(len(inputs)):\n curr_seq_len = len(inputs[example_id])\n X[example_id, :curr_seq_len] = inputs[example_id]\n input_mask[example_id, :curr_seq_len] = 1\n\n print \"example of data read:\"\n sample_input = inputs[0]\n sample_target = targets[0]\n print \"\\t input: \", sample_input\n print \"\\t target: sample_target\"\n\n\n ## TODO: normalize the inputs using mean.\n # From https://github.com/dtjchen/spoken-command-processor/blob/master/model/utils.py\n from sklearn import preprocessing\n def normalize_mean(X):\n scaler = preprocessing.StandardScaler(with_mean=True, with_std=False).fit(X)\n X = scaler.transform(X)\n return X, scaler.mean\n\n\n print \"Normalizing input data using mean...\"\n X, mean = normalize_mean(X)\n print \"Mean of input data:\", mean\n print \"After Normalization: example of data read:\"\n sample_input = inputs[0]\n sample_target = targets[0]\n print \"\\t input: \", sample_input\n print \"\\t target: sample_target\"\n\n\n if (not test):\n out_file_name = savedir + '/TIMIT_data_prepared_for_CTC.pkl'\n print \"Dumping to pickle file\", out_file_name\n with open(out_file_name, 'wb') as f:\n # pickle.dump({'x':inputs, 'y_indices': targets_as_alphabet_indices, 'y_char': targets, 'y_onehot': targets_one_hot, 'chars': list_of_alphabets}, f, protocol=3)\n pickle.dump({'x':X,\n 'inputs': inputs,\n 'mask': input_mask.astype(theano.config.floatX),\n 'y_indices': targets_as_alphabet_indices,\n 'y_char': targets,\n 'y_onehot': targets_one_hot,\n 'chars': list_of_alphabets}, f, protocol=2)\n\n #print 'success!'\n\ndef prepare_TIMIT_for_CLM(dataset='train', savedir = os.getcwd(), test = 0):\n rootdir = os.path.join(TIMIT_fixed_dir, dataset)\n\n if (test):\n ### Read from pkl for faster testing\n in_file_name = savedir + '/TIMIT_data_prepared_for_CTC.pkl'\n with open(in_file_name, 'rb') as f:\n reclaimed_data = pickle.load(f)\n inputs = reclaimed_data['x']\n targets = reclaimed_data['y_char']\n targets_as_alphabet_indices = reclaimed_data['y_indices']\n targets_one_hot = reclaimed_data['y_onehot']\n alphabet = reclaimed_data['chars']\n sample_input = inputs[0]\n sample_target = targets[0]\n # print sample_input\n # print sample_target\n else:\n inputs, targets, alphabet = get_data(rootdir)\n\n t = get_TIMIT_targets_one_hot(inputs, targets, alphabet)\n t1 = get_TIMIT_targets_as_alphabet_indices(inputs, targets, alphabet)\n n_batch = len(t)\n max_input_length = max([len(t[i]) for i in range(len(t))]) - 1 #As we predict from one less than the total sequence length\n input_dim = len(t[0][0])\n X = np.zeros((n_batch, max_input_length, input_dim))\n Y = np.zeros((n_batch, max_input_length))\n input_mask = np.zeros((n_batch, max_input_length))\n\n for example_id in range(len(t)):\n curr_seq_len = len(t[example_id][:-1])\n X[example_id, :curr_seq_len] = t[example_id][:-1]\n input_mask[example_id, :curr_seq_len] = 1\n Y[example_id, :curr_seq_len] = t1[example_id][1:]\n\n # inputs = X[:,:-1,:]\n # outputs = Y[:,1:]\n inputs1 = []\n outputs1 = [\n]\n for example_id in range(len(t)):\n # # example_inputs = t[example_id][:-1]\n # # example_outputs = t[example_id][1:]\n # # inputs.append(example_inputs)\n # # outputs.append(example_outputs)\n\n example_inputs1 = t1[example_id][:-1]\n example_outputs1 = t1[example_id][1:]\n inputs1.append(example_inputs1)\n outputs1.append(example_outputs1)\n\n if (not test):\n out_file_name = savedir + '/TIMIT_data_prepared_for_CLM.pkl'\n with open(out_file_name, 'wb') as f:\n # pickle.dump({'x':inputs, 'x_indices':inputs1, 'y': outputs, 'y_indices':outputs1}, f, protocol=3)\n # pickle.dump({'x':inputs.astype(theano.config.floatX), 'mask':input_mask.astype(theano.config.floatX), 'x_indices':inputs1, 'y': outputs, 'y_indices':outputs1}, f, protocol=3)\n pickle.dump({'x':X.astype(theano.config.floatX), 'mask':input_mask.astype(theano.config.floatX), 'y': Y.astype(np.int32), 'x_list': inputs1, 'y_list': outputs1}, f, protocol=2)\n # inputs = [ [ [ t[example][char] ] for char in range(0, len(t[example])-1)] for example in range(len(t))]\n # outputs = [ [ [ t[example][char] ] for char in range(1, len(t[example]))] for example in range(len(t))]\n # return inputs, outputs#, inputs1, outputs1\n\nif __name__=='__main__':\n if len(sys.argv) > 1:\n dataset = str(sys.argv[1])\n else:\n dataset = ''\n savedir = os.getcwd()\n #pdb.set_trace()\n\n from fixWavs import *\n fixWavs(TIMIT_original_dir, TIMIT_fixed_dir)\n\n # now we still need to copy the other files (txt, phn, wrd) to the fixed dir.\n \n prepare_TIMIT_for_CTC(dataset, savedir, test=0)\n\n print(\"\\n\\n##############################\")\n print(\"#### Preparing for CLM... ###\")\n print(\"##############################\")\n\n prepare_TIMIT_for_CLM(dataset, savedir, test=1)\n" }, { "alpha_fraction": 0.6540914177894592, "alphanum_fraction": 0.681721568107605, "avg_line_length": 35.882354736328125, "blob_id": "933273067429317db6411d36b2e51352bb64090f", "content_id": "af585964dfbba8cf892c5c45c04405c507636d1a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1882, "license_type": "permissive", "max_line_length": 94, "num_lines": 51, "path": "/code/Experiments/Tutorials/WildML_LSTM/train.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n\nimport sys\nimport os\nimport time\nimport numpy as np\nfrom utils import *\nfrom datetime import datetime\nfrom gru_theano import GRUTheano\n\nLEARNING_RATE = float(os.environ.get(\"LEARNING_RATE\", \"0.001\"))\nVOCABULARY_SIZE = int(os.environ.get(\"VOCABULARY_SIZE\", \"2000\"))\nEMBEDDING_DIM = int(os.environ.get(\"EMBEDDING_DIM\", \"48\"))\nHIDDEN_DIM = int(os.environ.get(\"HIDDEN_DIM\", \"128\"))\nNEPOCH = int(os.environ.get(\"NEPOCH\", \"20\"))\nMODEL_OUTPUT_FILE = os.environ.get(\"MODEL_OUTPUT_FILE\")\nINPUT_DATA_FILE = os.environ.get(\"INPUT_DATA_FILE\", \"./data/reddit-comments-2015.csv\")\nPRINT_EVERY = int(os.environ.get(\"PRINT_EVERY\", \"25000\"))\n\nif not MODEL_OUTPUT_FILE:\n ts = datetime.now().strftime(\"%Y-%m-%d-%H-%M\")\n MODEL_OUTPUT_FILE = \"GRU-%s-%s-%s-%s.dat\" % (ts, VOCABULARY_SIZE, EMBEDDING_DIM, HIDDEN_DIM)\n\n# Load data\nx_train, y_train, word_to_index, index_to_word = load_data(INPUT_DATA_FILE, VOCABULARY_SIZE)\n\n# Build model\nmodel = GRUTheano(VOCABULARY_SIZE, hidden_dim=HIDDEN_DIM, bptt_truncate=-1)\n\n# Print SGD step time\nt1 = time.time()\nmodel.sgd_step(x_train[10], y_train[10], LEARNING_RATE)\nt2 = time.time()\nprint \"SGD Step time: %f milliseconds\" % ((t2 - t1) * 1000.)\nsys.stdout.flush()\n\n# We do this every few examples to understand what's going on\ndef sgd_callback(model, num_examples_seen):\n dt = datetime.now().isoformat()\n loss = model.calculate_loss(x_train[:10000], y_train[:10000])\n print(\"\\n%s (%d)\" % (dt, num_examples_seen))\n print(\"--------------------------------------------------\")\n print(\"Loss: %f\" % loss)\n generate_sentences(model, 10, index_to_word, word_to_index)\n save_model_parameters_theano(model, MODEL_OUTPUT_FILE)\n print(\"\\n\")\n sys.stdout.flush()\n\nfor epoch in range(NEPOCH):\n train_with_sgd(model, x_train, y_train, learning_rate=LEARNING_RATE, nepoch=1, decay=0.9, \n callback_every=PRINT_EVERY, callback=sgd_callback)\n\n" }, { "alpha_fraction": 0.755892276763916, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 83.71428680419922, "blob_id": "27087d2ce612bbb022959ad0e4dbe12650d94a75", "content_id": "8215d50441f9c56171bcc7e5a742fcddcf69fc63", "detected_licenses": [ "Apache-2.0", "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 594, "license_type": "permissive", "max_line_length": 190, "num_lines": 7, "path": "/code/Experiments/Lasagne_examples/examples/ResNets/resnet152/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "Link to model pkl file generated by notebook: https://1drv.ms/u/s!Am9OrS5L0lkngiTNdH2rLhiOdiF2\n\nThis repository has code for finetuning or directly using the resnet-152 model pre-trained on Imagenet in theano and lasagne.\n\nThis is based on an awesome notebook doing the same things for resnet-50: https://github.com/Lasagne/Recipes/blob/master/examples/resnet50/ImageNet%20Pretrained%20Network%20(ResNet-50).ipynb\n\nFor licenses and credits please see the [notebook](https://github.com/kundan2510/resnet152-pre-trained-imagenet/blob/master/ImageNet%20Pretrained%20Network%20(ResNet-152).ipynb) \n" }, { "alpha_fraction": 0.6246684193611145, "alphanum_fraction": 0.6392573118209839, "avg_line_length": 29.200000762939453, "blob_id": "4a0dec675add792c1193556b42219cccc44e444c", "content_id": "5fd51bce1e6502522c0823be16e5858cefd605d2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 754, "license_type": "permissive", "max_line_length": 90, "num_lines": 25, "path": "/code/audioSR/Preprocessing/visualizeAudio.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom scipy.io import wavfile # get the api\nfrom scipy.fftpack import fft\nfrom pylab import *\n\ndef f(filename):\n fs, data = wavfile.read(filename) # load the data\n print(data.shape)\n a = data#.T[0] # this is a two channel soundtrack, I get the first track\n print(a.size)\n print(\"Normalizing...\")\n b=[(ele/2**8.)*2-1 for ele in a] # this is 16-bit track, b is now normalized on [-1,1)\n print(\"Calculating FFT...\")\n c = fft(b) # create a list of complex number\n d = len(c)/2 # you only need half of the fft list\n print(\"Plotting...\")\n plt.plot(abs(c[:(d-1)]),'r')\n savefig(filename+'.png',bbox_inches='tight')\n\n\nimport glob\nfiles = glob.glob('./*.wav')\nfor ele in files:\n f(ele)\nquit()" }, { "alpha_fraction": 0.6332557797431946, "alphanum_fraction": 0.6427420973777771, "avg_line_length": 41.31007766723633, "blob_id": "7af3651f637bc9c7c76cdae33b7804efc757b05d", "content_id": "6743f057dc07be234f855ee223216b5ecd6df3cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5587, "license_type": "permissive", "max_line_length": 138, "num_lines": 129, "path": "/code/Experiments/Shakespeare/shakespeare.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\r\n\r\nimport numpy as np\r\nimport theano\r\nimport theano.tensor as T\r\nimport lasagne\r\n\r\ntry:\r\n input_text = open(\"shakespeare_input.txt\", \"r\").read()\r\n input_text = input_text.decode(\"utf-8-sig\").encode(\"utf-8\")\r\nexcept Exception as e:\r\n raise IOError(\"Couldn't read input file\")\r\n\r\n#Based on training input, predict what follows:\r\ngeneration_phrase = \"First Citizen:\\nBefore we proceed any further, hear me speak.\"\r\n\r\nvocabulary = list(set(input_text))\r\ninput_size = len(input_text)\r\nvocabulary_size = len(vocabulary)\r\n\r\ncharacter_to_ix = {char:i for i, char in enumerate(vocabulary)}\r\nix_to_character = {i:char for i, char in enumerate(vocabulary)}\r\n\r\nlasagne.random.set_rng(np.random.RandomState(1))\r\n\r\n#Constants. Constants everywhere.\r\nSEQUENCE_SIZE = 20\r\nHIDDEN_SIZE = 512 #Amount of units in the two LSTM layers\r\nLEARNING_RATE = 0.01\r\nGRADIENT_CLAMP = 100 #Remove gradients above this number.\r\nPRINT_INTERVAL = 1 #How often to check output.\r\nEPOCHS = 50 #Number of epochs to train network.\r\nBATCH_SIZE = 128\r\n\r\ndef generate_data(p, batch_size=BATCH_SIZE, data=input_text, pass_target=True):\r\n x = np.zeros((batch_size, SEQUENCE_SIZE, vocabulary_size))\r\n y = np.zeros(batch_size)\r\n\r\n for n in range(batch_size):\r\n pointer = n\r\n for i in range(SEQUENCE_SIZE):\r\n x[n, i, character_to_ix[data[p + pointer + i]]] = 1\r\n if pass_target:\r\n y[n] = character_to_ix[data[p + pointer + SEQUENCE_SIZE]]\r\n return x, np.array(y, dtype=\"int32\")\r\n\r\ndef main(epochs=EPOCHS):\r\n print(\"Now building network ...\")\r\n #Build the network, starting at input layer.\r\n #Recurrent layers need input of shape:\r\n #(batch_size, SEQUENCE_SIZE, number of feature)\r\n layer_input = lasagne.layers.InputLayer(shape=(None, None, vocabulary_size))\r\n #Build Long Short Term Memory layer taking \"layer_input\" as first input.\r\n #Clamp the gradient to avoid the problem of exploding gradients.\r\n #Clamping/Clipping is defined by the \"GRADIENT_CLAMP\" ...\r\n layer_forward_01 = lasagne.layers.LSTMLayer(\r\n layer_input, HIDDEN_SIZE, grad_clipping=GRADIENT_CLAMP,\r\n nonlinearity=lasagne.nonlinearities.tanh)\r\n\r\n layer_forward_02 = lasagne.layers.LSTMLayer(\r\n layer_forward_01, HIDDEN_SIZE, grad_clipping=GRADIENT_CLAMP,\r\n nonlinearity=lasagne.nonlinearities.tanh)\r\n #The layer_forward creates output of dimension:\r\n #(batch_size, SEQUENCE_SIZE, HIDDEN_SIZE)\r\n #We care only about the final prediction, so we\r\n #isolate that quantity and feed it to the next layer.\r\n #Output of the sliced layer will be of dimension:\r\n #(batch_size, vocabulary_size)\r\n layer_forward_slice = lasagne.layers.SliceLayer(layer_forward_02, -1, 1)\r\n #The sliced output is parsed through softmax function to create\r\n #probability distribution\r\n layer_output = lasagne.layers.DenseLayer(\r\n layer_forward_slice, num_units=vocabulary_size,\r\n W=lasagne.init.Normal(), nonlinearity=lasagne.nonlinearities.softmax)\r\n target_values = T.ivector(\"target_output\")\r\n network_output = lasagne.layers.get_output(layer_output)\r\n\r\n cost = T.nnet.categorical_crossentropy(network_output, target_values).mean()\r\n #Recieve all parameters from the network.\r\n all_parameters = lasagne.layers.get_all_params(layer_output, trainable=True)\r\n #Compute AdaGrad updates for training.\r\n print(\"Computing updates ...\")\r\n updates = lasagne.updates.adagrad(cost, all_parameters, LEARNING_RATE)\r\n\r\n print(\"Compiling functions ...\")\r\n train = theano.function([layer_input.input_var, target_values], cost, updates=updates, allow_input_downcast=True)\r\n compute_cost = theano.function([layer_input.input_var, target_values], cost, allow_input_downcast=True)\r\n\r\n probs = theano.function([layer_input.input_var], network_output, allow_input_downcast=True)\r\n\r\n def try_stuff(N=200):\r\n assert(len(generation_phrase)>=SEQUENCE_SIZE)\r\n sample_ix = []\r\n x,_ = generate_data(len(generation_phrase) - SEQUENCE_SIZE, 1, generation_phrase,0)\r\n for i in range(N):\r\n # Pick the character that got assigned the highest probability\r\n ix = np.argmax(probs(x).ravel())\r\n # Alternatively, to sample from the distribution instead:\r\n # ix = np.random.choice(np.arange(vocab_size), p=probs(x).ravel())\r\n sample_ix.append(ix)\r\n x[:, 0:SEQUENCE_SIZE - 1,:] = x[:, 1:, :]\r\n x[:, SEQUENCE_SIZE - 1,:] = 0\r\n x[0, SEQUENCE_SIZE - 1, sample_ix[-1]] = 1.\r\n\r\n random_snippet = generation_phrase + \"\".join(ix_to_character[ix] for ix in sample_ix)\r\n print(\"----\\n %s \\n----\" % random_snippet)\r\n print(\"Training ...\")\r\n print(\"Seed for generation is: %s\" % generation_phrase)\r\n p = 0\r\n try:\r\n for it in xrange(input_size * epochs / BATCH_SIZE):\r\n try_stuff() #Generate text using p^th character as the start.\r\n\r\n average_cost = 0;\r\n for _ in range(PRINT_INTERVAL):\r\n x, y = generate_data(p)\r\n\r\n p += SEQUENCE_SIZE + BATCH_SIZE - 1\r\n\r\n if(p + BATCH_SIZE + SEQUENCE_SIZE >= input_size):\r\n print(\"Carriage return\")\r\n p = 0\r\n average_cost += train(x, y)\r\n print(\"Epoch {} average loss = {}\".format(it * 1.0 * PRINT_INTERVAL / input_size * BATCH_SIZE, average_cost / PRINT_INTERVAL))\r\n except KeyboardInterrupt:\r\n pass\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" }, { "alpha_fraction": 0.516228437423706, "alphanum_fraction": 0.5326622724533081, "avg_line_length": 26.664772033691406, "blob_id": "33567282df95f38538cbcc18cdb417baba87c30f", "content_id": "9731d01e2dbe0dff5e4aa1604f8b3ab018e474c8", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4868, "license_type": "permissive", "max_line_length": 84, "num_lines": 176, "path": "/code/lipreading/datasetClass.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport os\nimport logging\nimport pickle\n\nimport numpy as np\nfrom theano.compat.six.moves import xrange\n\nfrom pylearn2.datasets import cache, dense_design_matrix\nfrom pylearn2.expr.preprocessing import global_contrast_normalize\nfrom pylearn2.utils import contains_nan\nfrom pylearn2.utils import serial\nfrom pylearn2.utils import string_utils\n\n_logger = logging.getLogger(__name__)\n\n\nclass CIFAR10(dense_design_matrix.DenseDesignMatrix):\n\n \"\"\"\n .. todo::\n\n WRITEME\n\n Parameters\n ----------\n which_set : str\n One of 'train', 'test'\n center : WRITEME\n rescale : WRITEME\n gcn : float, optional\n Multiplicative constant to use for global contrast normalization.\n No global contrast normalization is applied, if None\n start : WRITEME\n stop : WRITEME\n axes : WRITEME\n toronto_prepro : WRITEME\n preprocessor : WRITEME\n \"\"\"\n\n def __init__(self, X, y, img_shape, center=False, rescale=False, gcn=None,\n start=None, stop=None, axes=('b', 0, 1, 'c'),\n toronto_prepro = False, preprocessor = None):\n\n self.axes = axes\n\n # we define here:\n dtype = 'uint8'\n\n # we also expose the following details:\n self.img_shape = img_shape\n self.img_size = np.prod(self.img_shape)\n self.n_classes = 39\n self.label_names = [line.rstrip('\\n') for line in open('./phonemeList.txt')]\n\n # save the input data\n # print(\"memory consumption (MB) of given matrix: \", X.nbytes/1000000)\n X = np.cast['float32'](X)\n # print(\"memory consumption (MB) after cast to float: \", X.nbytes/1000000)\n \n if isinstance(y, list):\n y = np.asarray(y).astype(dtype)\n\n # some preprocessing functions\n if center:\n X -= 127.5\n self.center = center\n\n if rescale:\n X /= 127.5\n self.rescale = rescale\n\n if toronto_prepro:\n assert not center\n assert not gcn\n X = X / 255.\n X = X - X.mean(axis=0)\n self.toronto_prepro = toronto_prepro\n\n self.gcn = gcn\n if gcn is not None:\n gcn = float(gcn)\n X = global_contrast_normalize(X, scale=gcn)\n\n view_converter = dense_design_matrix.DefaultViewConverter((120, 120, 1),\n axes)\n\n super(CIFAR10, self).__init__(X=X, y=y, view_converter=view_converter,\n y_labels=self.n_classes)\n\n assert not contains_nan(self.X)\n\n if preprocessor:\n preprocessor.apply(self)\n\n def adjust_for_viewer(self, X):\n \"\"\"\n .. todo::\n WRITEME\n \"\"\"\n # assumes no preprocessing. need to make preprocessors mark the\n # new ranges\n rval = X.copy()\n\n # patch old pkl files\n if not hasattr(self, 'center'):\n self.center = False\n if not hasattr(self, 'rescale'):\n self.rescale = False\n if not hasattr(self, 'gcn'):\n self.gcn = False\n\n if self.gcn is not None:\n rval = X.copy()\n for i in xrange(rval.shape[0]):\n rval[i, :] /= np.abs(rval[i, :]).max()\n return rval\n\n if not self.center:\n rval -= 127.5\n\n if not self.rescale:\n rval /= 127.5\n\n rval = np.clip(rval, -1., 1.)\n\n return rval\n\n def __setstate__(self, state):\n super(CIFAR10, self).__setstate__(state)\n # Patch old pkls\n if self.y is not None and self.y.ndim == 1:\n self.y = self.y.reshape((self.y.shape[0], 1))\n if 'y_labels' not in state:\n self.y_labels = 10\n\n def adjust_to_be_viewed_with(self, X, orig, per_example=False):\n \"\"\"\n .. todo::\n\n WRITEME\n \"\"\"\n # if the scale is set based on the data, display X oring the\n # scale determined by orig\n # assumes no preprocessing. need to make preprocessors mark\n # the new ranges\n rval = X.copy()\n\n # patch old pkl files\n if not hasattr(self, 'center'):\n self.center = False\n if not hasattr(self, 'rescale'):\n self.rescale = False\n if not hasattr(self, 'gcn'):\n self.gcn = False\n\n if self.gcn is not None:\n rval = X.copy()\n if per_example:\n for i in xrange(rval.shape[0]):\n rval[i, :] /= np.abs(orig[i, :]).max()\n else:\n rval /= np.abs(orig).max()\n rval = np.clip(rval, -1., 1.)\n return rval\n\n if not self.center:\n rval -= 127.5\n\n if not self.rescale:\n rval /= 127.5\n\n rval = np.clip(rval, -1., 1.)\n\n return rval" }, { "alpha_fraction": 0.6776315569877625, "alphanum_fraction": 0.6888545155525208, "avg_line_length": 43.94782638549805, "blob_id": "c1cfd4d49723db7a5d03d494a1c81819a336b9b1", "content_id": "de69980b3bc4cab8f8c6cb6a21100999cd6ca2df", "detected_licenses": [ "MIT", "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5168, "license_type": "permissive", "max_line_length": 213, "num_lines": 115, "path": "/code/Experiments/Lasagne_examples/examples/ResNets/resnet50/resnet50CaffeToLasagne.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# this gets a caffe network as input, and returns a Lasagne network as output\n\n\n# ResNet-50, network from the paper:\n# \"Deep Residual Learning for Image Recognition\"\n# http://arxiv.org/pdf/1512.03385v1.pdf\n# License: see https://github.com/KaimingHe/deep-residual-networks/blob/master/LICENSE\n\n# Download pretrained weights from:\n# https://s3.amazonaws.com/lasagne/recipes/pretrained/imagenet/resnet50.pkl\n\nimport lasagne\nfrom lasagne.utils import floatX\nfrom lasagne.layers import InputLayer\nfrom lasagne.layers import Conv2DLayer as ConvLayer\nfrom lasagne.layers import BatchNormLayer\nfrom lasagne.layers import Pool2DLayer as PoolLayer\nfrom lasagne.layers import NonlinearityLayer\nfrom lasagne.layers import ElemwiseSumLayer\nfrom lasagne.layers import DenseLayer\nfrom lasagne.nonlinearities import rectify, softmax\n\nimport caffe\nimport numpy as np\nimport pickle\n\nfrom resnet50LasagneModel import * # here the Resnet50 Lasagne model is described\n\n#### Gathering everything together\ndef build_network_fill_from_caffe(): # uses the model structure from build_model, fills the parameters from './ResNet-50-deploy.prototxt', './ResNet-50-model.caffemodel'\n \n # First, create the Lasagne Resnet50. Afterward, transfer weights from the caffe model.\n # Create head of the network (everything before first residual block) in Lasagne\n net = build_model()\n print 'Number of Lasagne layers:', len(lasagne.layers.get_all_layers(net['prob']))\n \n # # Transfer weights from caffe to lasagne\n # ## Load pretrained caffe model\n net_caffe = caffe.Net('./modelFiles/ResNet_TrainedModel/ResNet-50-deploy.prototxt', './modelFiles/ResNet_TrainedModel/ResNet-50-model.caffemodel', caffe.TEST)\n layers_caffe = dict(zip(list(net_caffe._layer_names), net_caffe.layers))\n print 'Number of caffe layers: %i' % len(layers_caffe.keys())\n \n # ## Copy weights\n # There is one more issue with BN layer: caffa stores variance $\\sigma^2$, but lasagne stores inverted standard deviation $\\dfrac{1}{\\sigma}$, so we need make simple transfommation to handle it.\n # Other issue reffers to weights ofthe dense layer, in caffe it is transposed, we should handle it too.\n \n for name, layer in net.items():\n if name not in layers_caffe:\n print name, type(layer).__name__\n continue\n if isinstance(layer, BatchNormLayer):\n layer_bn_caffe = layers_caffe[name]\n layer_scale_caffe = layers_caffe['scale' + name[2:]]\n layer.gamma.set_value(layer_scale_caffe.blobs[0].data)\n layer.beta.set_value(layer_scale_caffe.blobs[1].data)\n layer.mean.set_value(layer_bn_caffe.blobs[0].data)\n layer.inv_std.set_value(1 / np.sqrt(layer_bn_caffe.blobs[1].data) + 1e-4)\n continue\n if isinstance(layer, DenseLayer):\n layer.W.set_value(layers_caffe[name].blobs[0].data.T)\n layer.b.set_value(layers_caffe[name].blobs[1].data)\n continue\n if len(layers_caffe[name].blobs) > 0:\n layer.W.set_value(layers_caffe[name].blobs[0].data)\n if len(layers_caffe[name].blobs) > 1:\n layer.b.set_value(layers_caffe[name].blobs[1].data)\n \n # now, a Lasagne network is created and stored in the 'net' variable\n # the Caffe model ist stored in 'net_caffe'\n \n return net, net_caffe\n\n\n# These functions are for using the Resnet50 for ImageNet, but can be used for\n# needed for evaluating the model\ndef get_classes (filename='./modelFiles/imagenet_classes.txt'):\n print(\"getting classes...\")\n with open(filename, 'r') as f:\n classes = map(lambda s: s.strip(), f.readlines())\n return classes\n\ndef get_mean_values (filename='./modelFiles/ResNet_TrainedModel/ResNet_mean.binaryproto'):\n # Load mean values\n print(\"loading mean values...\")\n blob = caffe.proto.caffe_pb2.BlobProto()\n data = open(filename, 'rb').read()\n blob.ParseFromString(data)\n mean_values = np.array(caffe.io.blobproto_to_array(blob))[0]\n \n return mean_values\n\n\nif __name__ == \"__main__\":\n net, net_caffe = build_network_fill_from_caffe() # from resnetCaffeToLasagne.py, builds network and then fills it with the Caffe weights\n \n print(\"getting classes...\")\n classes = get_classes() # put filename here, default = \"./imagenet_classes.txt\"\n \n print(\"getting mean_values...\")\n mean_values = get_mean_values() # put filename here, default = './ResNet_mean.binaryproto'\n \n # print(\"testing model...\")\n # print(\"getting image urls...\") #put url here, default = 'http://www.image-net.org/challenges/LSVRC/2012/ori_urls/indexval.html'. Should be a list of image urls to download, open this link for an example\n # image_urls = download_images()\n # print(\"evaluating...\")\n # test_lasagne(classes, image_urls, mean_values, net, net_caffe)\n \n model = {\n 'values': lasagne.layers.get_all_param_values(net['prob']),\n 'synset_words': classes,\n 'mean_image': mean_values\n }\n \n print \"Compared all images. Storing Lasagne model in Pickle (pkl) file...\"\n pickle.dump(model, open('./resnet50imageNet.pkl', 'wb'), protocol=-1)" }, { "alpha_fraction": 0.7795454263687134, "alphanum_fraction": 0.8045454621315002, "avg_line_length": 54, "blob_id": "3d8a60609224fa336e61643ceef3f221ca95c2c1", "content_id": "f810ff3d5dbfa5112e61ce7d6c753a2dfd06f7d6", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 440, "license_type": "permissive", "max_line_length": 129, "num_lines": 8, "path": "/code/Experiments/BinaryNet-master/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# BinaryNet\n\nThis repository enables the reproduction of the experiments described in the article: \n[BinaryNet: Training Deep Neural Networks with Weights and Activations Constrained to +1 or -1.](http://arxiv.org/abs/1602.02830)\n\nIt is divided in two subrepositories:\n* Train-time enables the reproduction of the benchmark results reported in the article\n* Run-time demonstrates the XNOR and baseline GPU kernels described in the article\n" }, { "alpha_fraction": 0.5883455276489258, "alphanum_fraction": 0.5966944694519043, "avg_line_length": 29.73298454284668, "blob_id": "6f2bea9cace5d0d5a9dc19494418de966021d6bc", "content_id": "3742a774cb3366f891827322f9bce033bf0827a1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6013, "license_type": "permissive", "max_line_length": 172, "num_lines": 191, "path": "/code/dataset/TIMIT_crawler/src/crawler.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# coding=utf-8\nimport urllib, urllib2\nimport re\nimport os\nimport datetime\nimport threading\n\ng_mutex=threading.Condition()\ng_pages=[] #从中解析所有url链接\ng_dirs=[] #临时保存所有的dir路径\ng_queueURL=[] #等待爬取的url链接列表\ng_queueData_dir=[]#等待创建的文件夹列表\ng_existdirs=[] #已经爬取过的dir\ng_existURL=[] #已经爬取过的url链接列表\ng_failedURL=[] #下载失败的url链接列表\ng_totalcount=0 #下载过的页面数\nstarttime=0.0\ndef getHtml(url):\n page = urllib.urlopen(url)\n html = page.read()\n return html\nclass Crawler:\n def __init__(self,crawlername,url,threadnum,datadir):\n self.crawlername=crawlername\n self.url=url\n self.threadnum=threadnum\n self.threadpool=[]\n self.datadir = datadir\n self.logfile=file(\"log.txt\",'w')\n def craw(self):\n global g_queueURL\n #add the TIMIT url to the initial g_queueURL list\n g_queueURL.append(self.url)\n g_queueData_dir.append(self.datadir)\n depth=0\n print self.crawlername+\" started...\"\n while(len(g_queueURL)!=0):\n depth+=1\n print 'Searching depth ',depth,'...\\n\\n'\n self.logfile.write(\"URL:\"+g_queueURL[0]+\"........\")\n #download all this dir's file\n self.downloadAll()\n\n #update all subdir path to the queueURL list\n self.updateQueueURL()\n\n content='\\n>>>Depth '+str(depth)+':\\n'\n self.logfile.write(content)\n i=0\n while i<len(g_queueURL):\n content=str(g_totalcount+i)+'->'+g_queueURL[i]+'\\n'\n self.logfile.write(content)\n i+=1\n def downloadAll(self):\n global g_queueURL\n global g_totalcount\n global g_queueData_dir\n i=0\n while i<len(g_queueURL):\n j=0\n while j<self.threadnum and i+j < len(g_queueURL):\n g_totalcount+=1\n #download all files in the g_queueURL[i+j]\n threadresult=self.download(g_queueURL[i+j],g_queueData_dir[i+j],j)\n if threadresult!=None:\n print 'Thread started:',i+j,'--File number =',g_totalcount\n j+=1\n i+=j\n for thread in self.threadpool:\n thread.join(10)\n #don't if this it correct\n self.threadpool=[]\n g_queueURL=[]\n g_queueData_dir=[]\n#download all files from url and save to datadir\n def download(self,url,datadir,tid):\n crawthread=CrawlerThread(url,datadir,tid)\n self.threadpool.append(crawthread)\n crawthread.start()\n return 1\n#添加新的URL\n def updateQueueURL(self):\n global g_queueURL\n global g_existURL\n global g_queueData_dir\n global g_existdirs\n global g_pages\n global g_dirs\n newUrlList=[]\n newDirList = []\n #add the new dir path to it\n for url in g_pages:\n newUrlList+=url\n #substract the exist crawed URL\n g_queueURL=list(set(newUrlList)-set(g_existURL))\n for dir_path in g_dirs:\n newDirList += dir_path\n g_queueData_dir = list(set(newDirList) - set(g_existdirs))\n\ndef getUrl(self, content):\n pass\nclass CrawlerThread(threading.Thread):\n def __init__(self,url,datadir,tid):\n threading.Thread.__init__(self)\n global g_mutex\n global g_failedURL\n global g_queueURL\n\n self.url=url\n self.datadir=datadir\n self.tid=tid\n\n self.url_list = []\n self.dir_list = []\n self.file_list = []\n\n html = getHtml(self.url)\n reg = r'<a href=\"([^\\\"]+)\"'\n href_re = re.compile(reg)\n href_list = re.findall(href_re, html)\n href_list=href_list[5:len(href_list)]\n for href_url in href_list:\n if not href_url.startswith(\"?\") and not href_url.endswith(\"2007/\"):\n if href_url.endswith(\"/\"):\n dir_path = self.datadir + os.sep + href_url[0:len(href_url)-1]\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n url = self.url + href_url\n self.url_list.append(url)\n self.dir_list.append(dir_path)\n else:\n self.file_list.append(href_url)\n global g_totalcount\n g_mutex.acquire()\n g_totalcount = g_totalcount + 1\n g_mutex.release()\n g_mutex.acquire()\n g_pages.append(self.url_list)\n g_dirs.append(self.dir_list)\n g_existURL.append(self.url)\n g_existdirs.append(self.datadir)\n g_mutex.release()\n def run(self):\n global g_mutex\n global g_failedURL\n global g_queueURL\n try:\n for href_url in self.file_list:\n data_path = self.datadir + os.sep + href_url\n url = self.url + href_url\n f = urllib2.urlopen(url)\n meta = f.info()\n file_size = int(meta.getheaders(\"Content-Length\")[0])\n file_size_dl = 0\n block_sz = 8192\n file_local = open(data_path, 'wb')\n now_percent_i = 1\n while True:\n buffer = f.read(block_sz)\n if not buffer:\n break\n file_size_dl += len(buffer)\n file_local.write(buffer)\n if file_size_dl / float(file_size) > now_percent_i / 100.0:\n global starttime\n endtime = datetime.datetime.now()\n interval = (endtime - starttime).seconds\n print \"process %d,time collapese:%d s in %s, downloading %s: %f percent\" % (self.tid, interval, data_path, url, (file_size_dl / float(file_size)) * 100)\n now_percent_i = now_percent_i + 1\n file_local.close()\n except Exception,e:\n g_mutex.acquire()\n g_existURL.append(self.url)\n g_failedURL.append(self.url)\n g_mutex.release()\n print 'Failed downloading and saving',self.url\n print e\n return None\n\nif __name__==\"__main__\":\n baseurl= \"http://www.fon.hum.uva.nl/david/ma_ssp/2007/TIMIT/\"\n threadnum= 128\n crawlername=\"TIMIT_Crawler\"\n\n local_data_dir = os.path.abspath('../data')\n\n global starttime\n starttime = datetime.datetime.now()\n\n crawler=Crawler(crawlername,baseurl,threadnum,local_data_dir)\n crawler.craw()" }, { "alpha_fraction": 0.5683451890945435, "alphanum_fraction": 0.5725663304328918, "avg_line_length": 35.84739685058594, "blob_id": "892c4a5fe661de5963c61b671fb23470672678f9", "content_id": "46e8c1e46ff911106f947a352c73b5c4eb8adfed", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50223, "license_type": "permissive", "max_line_length": 99, "num_lines": 1363, "path": "/code/Experiments/neon-master/neon/layers/container.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------------------------\n# Copyright 2014-2016 Nervana Systems Inc.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ----------------------------------------------------------------------------\n\nfrom builtins import str, zip, range\nimport numpy as np\nimport itertools as itt\nfrom operator import add\n\nfrom neon import NervanaObject\nfrom neon.layers.layer import Layer, BranchNode, Dropout, DataTransform, LookupTable\nfrom neon.layers.recurrent import Recurrent, get_steps\nfrom neon.util.persist import load_class\nfrom functools import reduce\n\n\ndef flatten(item):\n if hasattr(item, '__iter__'):\n for i in iter(item):\n for j in flatten(i):\n yield j\n else:\n yield item\n\n\nclass DeltasTree(NervanaObject):\n \"\"\"\n Data structure for maintaining nested global delta buffers\n \"\"\"\n def __init__(self, parent=None):\n self.parent = None\n self.child = None\n self.buffers = [None]*2\n self.max_shape = 0\n if parent:\n assert type(parent) is DeltasTree\n self.parent = parent\n\n def decend(self):\n if self.child is None:\n self.child = DeltasTree()\n return self.child\n\n def ascend(self):\n return self.parent\n\n def proc_layer(self, layer):\n in_size = layer.be.shared_iobuf_size(layer.in_shape,\n layer.parallelism)\n if in_size > self.max_shape:\n self.max_shape = in_size\n\n def allocate_buffers(self):\n if self.child:\n self.child.allocate_buffers()\n\n for ind in range(len(self.buffers)):\n if self.buffers[ind] is None:\n if self.max_shape > 0:\n self.buffers[ind] = self.be.iobuf(self.max_shape,\n persist_values=False,\n parallelism=\"Data\")\n\n\nclass LayerContainer(Layer):\n \"\"\"\n Layer containers are a generic class that are used to encapsulate groups of layers and\n provide methods for propagating through the constituent layers, allocating memory.\n \"\"\"\n @property\n def layers_to_optimize(self):\n lto = []\n for l in self.layers:\n if isinstance(l, LayerContainer):\n lto += l.layers_to_optimize\n elif l.has_params:\n if hasattr(l, 'init') and l.init.name == \"Identity\":\n continue\n lto.append(l)\n return lto\n\n @property\n def nest_deltas(self):\n return False\n\n def nested_str(self, level=0):\n \"\"\"\n Utility function for displaying layer info with a given indentation level.\n\n Arguments:\n level (int, optional): indentation level\n\n Returns:\n str: layer info at the given indentation level\n \"\"\"\n padstr = '\\n' + ' ' * level\n ss = ' ' * level + self.classnm + padstr\n ss += padstr.join([l.nested_str(level + 1) for l in self.layers])\n return ss\n\n @classmethod\n def gen_class(cls, pdict):\n layers = []\n\n for layer in pdict['layers']:\n typ = layer['type']\n ccls = load_class(typ)\n layers.append(ccls.gen_class(layer['config']))\n\n # the 'layers' key is special in that the layer\n # parameters are in there and need to be saved the\n # whole pdict['layers'] element can not be replaced\n # with the just the layer objects like elsewhere\n lsave = pdict.pop('layers')\n new_cls = cls(layers=layers, **pdict)\n pdict['layers'] = lsave\n return new_cls\n\n def get_description(self, get_weights=False, keep_states=False):\n \"\"\"\n Get layer parameters. All parameters are needed for optimization, but\n only weights are serialized.\n\n Arguments:\n get_weights (bool, optional): Control whether all parameters are returned or\n just weights for serialization.\n keep_states (bool, optional): Control whether all parameters are returned\n or just weights for serialization.\n \"\"\"\n desc = super(LayerContainer, self).get_description(skip=['layers'])\n desc['container'] = True\n desc['config']['layers'] = []\n for layer in self.layers:\n desc['config']['layers'].append(layer.get_description(get_weights=get_weights,\n keep_states=keep_states))\n self._desc = desc\n return desc\n\n def load_weights(self, pdict, load_states=True):\n \"\"\"\n Load weights.\n\n Arguments:\n pdict:\n load_states: (Default value = True)\n\n Returns:\n\n \"\"\"\n assert len(pdict['config']['layers']) == len(self.layers)\n for branch, bdict in zip(self.layers, pdict['config']['layers']):\n branch.load_weights(bdict, load_states=load_states)\n\n def revert_tensors(self):\n for tensor in itt.chain.from_iterable([l.revert_list for l in self.layers]):\n self.be.revert_tensor(tensor)\n\n def propagate_parallelism(self, p):\n for l in self.layers:\n if isinstance(l, LayerContainer):\n l.parallelism = p\n l.propagate_parallelism(p)\n t = l.get_terminal()\n p = t[0].parallelism if isinstance(t, list) else t.parallelism\n else:\n l.parallelism = p if l.parallelism == \"Unknown\" else l.parallelism\n p = l.parallelism\n\n def set_batch_size(self, N):\n \"\"\"\n Set minibatch size.\n\n Arguments:\n N (int): minibatch size\n \"\"\"\n for l in self.layers:\n l.set_batch_size(N)\n\n def set_seq_len(self, S):\n \"\"\"\n Set sequence length.\n\n Arguments:\n S (int): sequence length\n \"\"\"\n for l in self.layers:\n l.set_seq_len(S)\n\n def set_deltas(self, global_deltas):\n \"\"\"\n Set the layer deltas from the shared\n global deltas pool\n \"\"\"\n for l in self.layers:\n l.set_deltas(global_deltas)\n\n def layers_fprop(self):\n \"\"\"\n Generator to iterator over the layers in the same\n order as fprop\n \"\"\"\n for layer in self.layers:\n yield layer\n if hasattr(layer, 'layers_fprop'):\n for layer2 in layer.layers_fprop():\n yield layer2\n\n def layers_bprop(self):\n \"\"\"\n Generator to iterator over the layers in the same\n order as bprop\n \"\"\"\n for layer in reversed(self.layers):\n if hasattr(layer, 'layers_bprop'):\n for layer2 in layer.layers_bprop():\n yield layer2\n yield layer\n\n\nclass Sequential(LayerContainer):\n \"\"\"\n Layer container that encapsulates a simple linear pathway of layers.\n\n Arguments:\n layers (list): List of objects which can be either a list of layers\n (including layer containers).\n \"\"\"\n def __init__(self, layers, name=None):\n super(Sequential, self).__init__(name)\n\n self.layers = [l for l in flatten(layers)]\n self._layers = [x for x in self.layers if type(x) not in (BranchNode,)]\n root = self._layers[0]\n assert (root.owns_output or\n type(root) in [Dropout, DataTransform]), \"Sequential root must own outputs\"\n\n def configure(self, in_obj):\n \"\"\"\n Must receive a list of shapes for configuration (one for each pathway)\n the shapes correspond to the layer_container attribute\n\n Arguments:\n in_obj: any object that has an out_shape (Layer) or shape (Tensor, dataset)\n \"\"\"\n if in_obj:\n config_layers = self.layers\n in_obj = in_obj\n else:\n in_obj = self.layers[0]\n\n # Remove the initial branch nodes from the layers\n for l_idx, l in enumerate(self.layers):\n if type(l) in (BranchNode,):\n continue\n else:\n config_layers = self.layers[l_idx:]\n break\n\n super(Sequential, self).configure(in_obj)\n prev_layer = None\n for l in config_layers:\n in_obj = l.configure(in_obj)\n if prev_layer is not None:\n prev_layer.set_next(l)\n prev_layer = l\n self.parallelism = in_obj.parallelism\n self.out_shape = in_obj.out_shape\n return self\n\n def allocate(self, shared_outputs=None):\n \"\"\"\n Allocate output buffer to store activations from fprop.\n\n Arguments:\n shared_outputs (Tensor, optional): pre-allocated tensor for activations to be\n computed into\n \"\"\"\n # get the layers that own their outputs\n alloc_layers = [l for l in self.layers if l.owns_output]\n alloc_layers[-1].allocate(shared_outputs)\n for l in self.layers:\n l.allocate()\n\n def allocate_deltas(self, global_deltas=None):\n if global_deltas is None:\n self.global_deltas = DeltasTree()\n\n st_ind = 0 if getattr(self.layers[0], 'nest_deltas', False) else 1\n for layer in self.layers[st_ind:]:\n layer.allocate_deltas(self.global_deltas)\n\n self.global_deltas.allocate_buffers()\n else:\n self.global_deltas = global_deltas\n\n self.set_deltas(self.global_deltas)\n\n def fprop(self, inputs, inference=False, beta=0.0):\n \"\"\"\n TODO: Handle final layers that don't own their own outputs (bias, activation)\n\n Arguments:\n inputs:\n inference: (Default value = False)\n beta: (Default value = 0.0)\n\n Returns:\n\n \"\"\"\n x = inputs\n\n for l in self.layers:\n altered_tensor = l.be.distribute_data(x, l.parallelism)\n l.revert_list = [altered_tensor] if altered_tensor else []\n\n if l is self.layers[-1] and beta != 0:\n x = l.fprop(x, inference=inference, beta=beta)\n else:\n x = l.fprop(x, inference=inference)\n\n if inference:\n self.revert_tensors()\n\n return x\n\n def bprop(self, error, alpha=1.0, beta=0.0):\n \"\"\"\n Apply the backward pass transformation to the input data.\n\n Arguments:\n error (Tensor): deltas back propagated from the adjacent higher layer\n alpha (float, optional): scale to apply to input for activation\n gradient bprop. Defaults to 1.0\n beta (float, optional): scale to apply to output activation\n gradient bprop. Defaults to 0.0\n\n Returns:\n Tensor: deltas to propagate to the adjacent lower layer\n \"\"\"\n for l in reversed(self._layers):\n altered_tensor = l.be.distribute_data(error, l.parallelism)\n if altered_tensor:\n l.revert_list.append(altered_tensor)\n if type(l.prev_layer) is BranchNode or l is self._layers[0]:\n error = l.bprop(error, alpha, beta)\n else:\n error = l.bprop(error)\n\n for tensor in l.revert_list:\n self.be.revert_tensor(tensor)\n return self._layers[0].deltas\n\n def get_terminal(self):\n \"\"\"\n Used for recursively getting final nodes from layer containers.\n \"\"\"\n terminal = self.layers[-1].get_terminal()\n return terminal\n\n\nclass Tree(LayerContainer):\n \"\"\"\n Layer container that encapsulates a simple linear pathway of layers.\n\n Arguments:\n layers (list): List of Sequential containers corresponding to the branches of the Tree.\n The branches must be provided with main trunk first, and then the auxiliary\n branches in the order the branch nodes are encountered\n name (string, optional): Name for the container\n alphas (list(float), optional): list of weighting factors to apply to each branch for\n backpropagating error.\n \"\"\"\n\n def __init__(self, layers, name=None, alphas=None):\n super(Tree, self).__init__(name=name)\n self.layers = []\n for l in layers:\n if isinstance(l, Sequential):\n self.layers.append(l)\n elif isinstance(l, list):\n self.layers.append(Sequential(l))\n elif isinstance(l, Layer):\n self.layers.append(Sequential([l]))\n else:\n ValueError(\"Incompatible element for Tree container\")\n\n self.alphas = [1.0 for _ in self.layers] if alphas is None else alphas\n\n # alphas and betas are used for back propagation\n # We want to ensure that the branches are ordered according to the origin of their roots\n # then the betas will be 0 for the last appearance of the root, and 1 for the rest,\n # but the trunk will always be 1 (since it contains all of the branch nodes)\n self.betas = []\n next_root = None\n for l in reversed(self.layers):\n root = l.layers[0]\n beta = 1.0 if (root is next_root or type(root) is not BranchNode) else 0.0\n next_root = root\n self.betas.append(beta)\n self.betas.reverse()\n\n def nested_str(self, level=0):\n \"\"\"\n Utility function for displaying layer info with a given indentation level.\n\n Arguments:\n level (int, optional): indentation level\n\n Returns:\n str: layer info at the given indentation level\n \"\"\"\n ss = self.classnm + '\\n'\n ss += '\\n'.join([l.nested_str(level + 1) for l in self.layers])\n return ss\n\n def configure(self, in_obj):\n \"\"\"\n Set shape based parameters of this layer given an input tuple, int\n or input layer.\n\n Arguments:\n in_obj (int, tuple, Layer, Tensor or dataset): object that provides shape\n information for layer\n\n Returns:\n (tuple): shape of output data\n \"\"\"\n super(Tree, self).configure(in_obj)\n self.layers[0].configure(in_obj)\n for l in self.layers[1:]:\n l.configure(None)\n self.out_shape = [l.out_shape for l in self.layers]\n return self\n\n def allocate(self, shared_outputs=None):\n \"\"\"\n Allocate output buffer to store activations from fprop.\n\n Arguments:\n shared_outputs (Tensor, optional): pre-allocated tensor for activations to be\n computed into\n \"\"\"\n for l in self.layers:\n l.allocate()\n self.outputs = [l.outputs for l in self.layers]\n\n def allocate_deltas(self, global_deltas=None):\n for l in reversed(self.layers):\n l.allocate_deltas(global_deltas)\n\n def fprop(self, inputs, inference=False):\n \"\"\"\n Apply the forward pass transformation to the input data.\n\n Arguments:\n inputs (Tensor): input data\n\n Returns:\n Tensor: output data\n \"\"\"\n x = self.layers[0].fprop(inputs, inference)\n out = [x] + [l.fprop(None, inference=inference) for l in self.layers[1:]]\n return out\n\n def bprop(self, error, alpha=1.0, beta=0.0):\n \"\"\"\n Apply the backward pass transformation to the input data.\n\n Arguments:\n error (Tensor): deltas back propagated from the adjacent higher layer\n\n Returns:\n Tensor: deltas to propagate to the adjacent lower layer\n \"\"\"\n for l, e, a, b in reversed(list(zip(self.layers, error, self.alphas, self.betas))):\n l.bprop(e, alpha=a, beta=b)\n\n def get_terminal(self):\n \"\"\"\n Used for recursively getting final nodes from layer containers.\n \"\"\"\n return [l.get_terminal() for l in self.layers]\n\n\nclass SingleOutputTree(Tree):\n \"\"\"\n Subclass of the Tree container which returns only\n the output of the main branch (branch index 0) during\n inference.\n \"\"\"\n def fprop(self, inputs, inference=False):\n \"\"\"\n Apply the forward pass transformation to the input data.\n\n Arguments:\n inputs (Tensor): input data\n\n Returns:\n Tensor: output data\n \"\"\"\n x = self.layers[0].fprop(inputs, inference)\n if inference:\n return x\n else:\n out = [x] + [l.fprop(None) for l in self.layers[1:]]\n return out\n\n\nclass Broadcast(LayerContainer):\n \"\"\"\n Parent class for MergeSum and MergeBroadcast.\n \"\"\"\n def __init__(self, layers, name=None):\n super(Broadcast, self).__init__(name)\n # Input list of layers converts:\n # lists to Sequential container\n # singleton layers to Sequential containers of 1\n # leaves Sequentials alone\n self.layers = []\n for l in layers:\n if isinstance(l, Sequential):\n self.layers.append(l)\n elif isinstance(l, list):\n self.layers.append(Sequential(l))\n elif isinstance(l, Layer):\n self.layers.append(Sequential([l]))\n else:\n ValueError(\"Incompatible element for \" + self.__class__.__name__ + \" Layer\")\n self.owns_output = True\n self.outputs = None\n\n @property\n def nest_deltas(self):\n return True\n\n def __str__(self):\n ss = '\\n\\t'.join([str(l) for l in self.layers])\n ss = '\\t' + self.classnm + '\\n\\t' + ss\n return ss\n\n def configure(self, in_obj):\n \"\"\"\n Sets shape based parameters of this layer given an input tuple or int\n or input layer\n\n Arguments:\n in_obj (int, tuple, Layer or Tensor or dataset): object that provides shape\n information for layer\n\n Returns:\n (tuple): shape of output data\n \"\"\"\n super(Broadcast, self).configure(in_obj)\n\n # Receiving from single source -- distribute to branches\n for l in self.layers:\n l.configure(in_obj)\n self._configure_merge()\n return self\n\n def allocate_deltas(self, global_deltas):\n nested_deltas = global_deltas.decend()\n for layer in self.layers:\n layer.layers[0].allocate_deltas(global_deltas)\n for sublayer in layer.layers[1:]:\n sublayer.allocate_deltas(nested_deltas)\n\n def set_deltas(self, delta_buffers):\n \"\"\"\n Use pre-allocated (by layer containers) list of buffers for backpropagated error.\n Only set deltas for layers that own their own deltas\n Only allocate space if layer owns its own deltas (e.g., bias and activation work in-place,\n so do not own their deltas).\n\n Arguments:\n delta_buffers (DeltasTree): list of pre-allocated tensors (provided by layer container)\n \"\"\"\n bottom_buffer = delta_buffers.buffers[0]\n\n nested_deltas = delta_buffers.decend()\n assert nested_deltas is not None\n for l in self.layers:\n l.layers[0].set_deltas(delta_buffers)\n delta_buffers.buffers.reverse() # undo that last reverse\n for sublayer in l.layers[1:]:\n sublayer.set_deltas(nested_deltas)\n\n # Special case if originating from a branch node\n if type(self.prev_layer) is BranchNode:\n self.deltas = self.be.iobuf(self.in_shape, shared=self.prev_layer.deltas,\n parallelism=self.parallelism)\n else:\n self.deltas = self.be.iobuf(self.in_shape, shared=bottom_buffer,\n parallelism=self.parallelism)\n delta_buffers.buffers.reverse()\n\n def get_terminal(self):\n \"\"\"\n Used for recursively getting final nodes from layer containers.\n \"\"\"\n terminals = [l.get_terminal() for l in self.layers]\n return terminals\n\n\nclass MergeSum(Broadcast):\n \"\"\"\n \"\"\"\n\n def allocate(self, shared_outputs=None):\n \"\"\"\n Allocate output buffer to store activations from fprop.\n\n Arguments:\n shared_outputs (Tensor, optional): pre-allocated tensor for activations to be\n computed into\n \"\"\"\n if self.outputs is None:\n self.outputs = self.be.iobuf(self.out_shape, shared=shared_outputs,\n parallelism=self.parallelism)\n for l in self.layers:\n l.allocate(shared_outputs=self.outputs)\n\n def _configure_merge(self):\n \"\"\"\n Helper function for configuring output shape\n \"\"\"\n out_shapes = [l.out_shape for l in self.layers]\n self.out_shape = out_shapes[0]\n\n def fprop(self, inputs, inference=False):\n \"\"\"\n Apply the forward pass transformation to the input data.\n\n Arguments:\n inputs (Tensor): input data\n\n Returns:\n Tensor: output data\n \"\"\"\n for l in self.layers:\n beta = 0 if l is self.layers[0] else 1\n l.fprop(inputs, inference, beta=beta)\n return self.outputs\n\n def bprop(self, error, alpha=1.0, beta=0.0):\n \"\"\"\n Apply the backward pass transformation to the input data.\n\n Arguments:\n error (Tensor): deltas back propagated from the adjacent higher layer\n alpha (float, optional): scale to apply to input for activation\n gradient bprop. Defaults to 1.0\n beta (float, optional): scale to apply to output activation\n gradient bprop. Defaults to 0.0\n\n Returns:\n Tensor: deltas to propagate to the adjacent lower layer\n \"\"\"\n for l in reversed(self.layers):\n b = beta if l is self.layers[-1] else 1\n l.bprop(error, alpha=alpha, beta=b)\n return self.deltas\n\n\nclass MergeBroadcast(Broadcast):\n \"\"\"\n Branches a single incoming layer or object (broadcast) into multiple output paths that are\n then combined again (merged). This container supports several options for concatenating the\n paths (\"recurrent\", \"depth\", and \"stack\").\n\n \"recurrent\" is used when merging two recurrent output streams.\n\n \"depth\" concatenates activations that have a notion of spatial dimension. Multiple\n activations can be concatenated along the feature map dimension, but the feature map\n shapes have to be the same.\n\n \"stack\" ignores the feature map shape and simply stacks the non-batch dimensions\n atop each other. Used to concatenate the output of fully connected layers with each\n other, and fully connected layers with convolutional layers.\n\n For example, suppose we are merging a conv layer with output shape (10, 5, 5)\n and a fully connected layer with 100 output nodes. Using 'depth' is not allowable.\n By using 'stack', the (10, 5, 5) output of the conv layer would just be interpreted as\n 250 output nodes that are stacked on top of the 100 nodes from the fully connected\n layer to get a total merged output of 350 nodes.\n\n Arguments:\n layers (list(list(Layer), LayerContainer): list of either layer lists,\n or layer containers. Elements that are\n lists will be wrapped in Sequential\n containers\n merge (string): the merging method. Must be 'recurrent', 'depth', or 'stack'\n alphas (list(float), optional): list of alpha values by which to weight the\n backpropagated errors\n name (str): Container name. Defaults to \"MergeBroadcast\"\n \"\"\"\n def __init__(self, layers, merge, alphas=None, name=None):\n super(MergeBroadcast, self).__init__(layers, name)\n\n self.betas = [1.0 for _ in self.layers]\n self.betas[-1] = 0.0\n self.alphas = [1.0 for _ in self.layers] if alphas is None else alphas\n\n self.merge = merge # How this MergeBroadcast gets merged\n assert self.merge in (\"recurrent\", \"depth\", \"stack\")\n self.error_views = None\n\n def get_partitions(self, x, slices):\n \"\"\"\n Given a partitioning, slices, of an activation buffer, x, determine which axis to slice\n along depending on whether x is a sequential tensor or not.\n\n Arguments:\n x:\n slices:\n\n Returns:\n\n \"\"\"\n if x.shape[-1] != self.be.bsz: # This is the sequential case\n return [x[:, sl] for sl in slices]\n else:\n return [x[sl] for sl in slices]\n\n def allocate(self, shared_outputs=None):\n \"\"\"\n Allocate output buffer to store activations from fprop.\n\n Arguments:\n shared_outputs (Tensor, optional): pre-allocated tensor for activations to be\n computed into\n \"\"\"\n if self.outputs is None:\n self.outputs = self.be.iobuf(self.out_shape, shared=shared_outputs,\n parallelism=self.parallelism)\n self.output_views = self.get_partitions(self.outputs, self.slices)\n for l, out_view in zip(self.layers, self.output_views):\n l.allocate(shared_outputs=out_view)\n\n def _configure_merge(self):\n \"\"\"\n Helper function for configuring shapes depending on the merge concatenation type\n \"\"\"\n in_shapes = [l.out_shape for l in self.layers]\n # Figure out how to merge\n if self.merge == \"recurrent\":\n catdims = [xs[1] for xs in in_shapes]\n self.out_shape = (in_shapes[0][0], sum(catdims))\n stride_size = self.be.bsz\n elif self.merge == \"depth\":\n catdims = [xs[0] for xs in in_shapes]\n self.out_shape = (sum(catdims),) + in_shapes[0][1:]\n stride_size = np.prod(in_shapes[0][1:])\n elif self.merge == \"stack\":\n catdims = [xs if isinstance(xs, int) else np.prod(xs) for xs in in_shapes]\n self.out_shape = sum(catdims)\n stride_size = 1\n end_idx = [idx * stride_size for idx in np.cumsum(catdims)]\n start_idx = [0] + end_idx[:-1]\n self.slices = [slice(s, e) for s, e in zip(start_idx, end_idx)]\n\n def fprop(self, inputs, inference=False):\n \"\"\"\n Apply the forward pass transformation to the input data.\n\n Arguments:\n inputs (Tensor): input data\n\n Returns:\n Tensor: output data\n \"\"\"\n for l in self.layers:\n l.fprop(inputs, inference)\n return self.outputs\n\n def bprop(self, error, alpha=1.0, beta=0.0):\n \"\"\"\n Apply the backward pass transformation to the input data.\n\n Arguments:\n error (Tensor): deltas back propagated from the adjacent higher layer\n alpha (float, optional): scale to apply to input for activation\n gradient bprop. Defaults to 1.0\n beta (float, optional): scale to apply to output activation\n gradient bprop. Defaults to 0.0\n\n Returns:\n Tensor: deltas to propagate to the adjacent lower layer\n \"\"\"\n self.betas[-1] = beta\n if self.error_views is None:\n self.error_views = self.get_partitions(error, self.slices)\n for l, e, a, b in reversed(list(zip(self.layers, self.error_views, self.alphas,\n self.betas))):\n l.bprop(e, alpha=a * alpha, beta=b)\n return self.deltas\n\n\nclass MergeMultistream(MergeBroadcast):\n \"\"\"\n Merging multiple input sources via concatenation. This container is similar to MergeBroadcast\n except that it receives different streams of input directly from a dataset.\n \"\"\"\n def __init__(self, layers, merge, name=None):\n super(MergeMultistream, self).__init__(layers, merge=merge, name=name)\n\n @property\n def nest_deltas(self):\n return False\n\n def configure(self, in_obj):\n \"\"\"\n Must receive a list of shapes for configuration (one for each pathway)\n the shapes correspond to the layer_container attribute\n\n Arguments:\n in_obj (list(Tensor)): list of Data tensors provided to each sequential container\n \"\"\"\n self.prev_layer = None\n if not isinstance(in_obj, list):\n assert hasattr(in_obj, 'shape') and isinstance(in_obj.shape, list)\n in_obj = in_obj.shape\n assert isinstance(in_obj, list), \"Multistream inputs must be interpretable as shapes\"\n for inp, l in zip(in_obj, self.layers):\n l.configure(inp)\n self._configure_merge()\n return self\n\n def set_deltas(self, delta_buffers):\n \"\"\"\n Use pre-allocated (by layer containers) list of buffers for backpropagated error.\n Only set deltas for layers that own their own deltas\n Only allocate space if layer owns its own deltas (e.g., bias and activation work in-place,\n so do not own their deltas).\n\n Arguments:\n delta_buffers (list): list of pre-allocated tensors (provided by layer container)\n \"\"\"\n # delta_buffers ignored here, will generate\n # new delta buffers for each sequential container\n for l in self.layers:\n l.allocate_deltas()\n\n def fprop(self, inputs, inference=False):\n \"\"\"\n Apply the forward pass transformation to the input data.\n\n Arguments:\n inputs (Tensor): input data\n\n Returns:\n Tensor: output data\n \"\"\"\n for l, inp in zip(self.layers, inputs):\n l.fprop(inp, inference)\n return self.outputs\n\n def bprop(self, error, alpha=1.0, beta=0.0):\n \"\"\"\n Apply the backward pass transformation to the input data.\n\n Arguments:\n error (Tensor): deltas back propagated from the adjacent higher layer\n alpha (float, optional): scale to apply to input for activation\n gradient bprop. Defaults to 1.0\n beta (float, optional): scale to apply to output activation\n gradient bprop. Defaults to 0.0\n\n Returns:\n Tensor: deltas to propagate to the adjacent lower layer\n \"\"\"\n if self.error_views is None:\n self.error_views = self.get_partitions(error, self.slices)\n for l, e in zip(self.layers, self.error_views):\n l.bprop(e)\n\n\nclass Encoder(Sequential):\n \"\"\"\n Encoder stack for the Seq2Seq container. Acts like a sequential\n except for bprop which are connected as specified to Decoder recurrent layers\n \"\"\"\n def __init__(self, layers, name=None):\n super(Encoder, self).__init__(layers, name)\n # list of recurrent layers only:\n self._recurrent = [l for l in self.layers if isinstance(l, Recurrent)]\n self.connections = None\n self.error_buf = None\n self.error_slices = None\n\n def allocate_deltas(self, global_deltas=None):\n super(Encoder, self).allocate_deltas(global_deltas=global_deltas)\n\n self.error_buf = self.be.iobuf(self.out_shape)\n self.error_slices = get_steps(self.error_buf, self.out_shape)\n\n def set_connections(self, decoder_cons):\n \"\"\"\n Based on decoder connections, create the list of which layers encoder are\n connected to.\n \"\"\"\n cons = []\n for ii in range(len(self._recurrent)):\n l_list = [i_dec for i_dec, i_enc in enumerate(decoder_cons) if i_enc == ii]\n cons.append(l_list)\n self.connections = cons\n\n def get_final_states(self, decoder_cons):\n \"\"\"\n Based on decoder connections, prepare the list of final states for decoder\n \"\"\"\n final_states = [self._recurrent[ii].final_state()\n if ii is not None else None\n for ii in decoder_cons]\n\n return final_states\n\n def bprop(self, hidden_error_list, inference=False, alpha=1.0, beta=0.0):\n \"\"\"\n Arguments:\n hidden_error_list: Decoder container bprop output. List of errors\n associated with decoder recurrent layers.\n \"\"\"\n i_enc = len(self._recurrent) - 1 # index into recurrent layers, in reverse order\n\n # initialize error to zeros (shape of last encoder layer output)\n error = self.error_buf\n error.fill(0)\n\n # bprop through layers, setting up connections from decoder layers for recurrent layers\n for l in reversed(self._layers):\n altered_tensor = l.be.distribute_data(error, l.parallelism)\n if altered_tensor:\n l.revert_list.append(altered_tensor)\n\n # add the hidden error by the hidden error list\n if isinstance(l, Recurrent):\n for i_dec in self.connections[i_enc]:\n self.error_slices[-1][:] = self.error_slices[-1] + hidden_error_list[i_dec]\n i_enc -= 1\n\n # normal bprop through the layers\n if type(l.prev_layer) is BranchNode or l is self._layers[0]:\n error = l.bprop(error, alpha, beta)\n else:\n error = l.bprop(error)\n\n for tensor in l.revert_list:\n self.be.revert_tensor(tensor)\n\n\nclass Decoder(Sequential):\n \"\"\"\n Decoder stack for the Seq2Seq container. Acts like a sequential\n except for fprop which takes the additional init_state_list, and bprop\n which takes additional hidden_delta\n \"\"\"\n def __init__(self, layers, name=None):\n super(Decoder, self).__init__(layers, name)\n # list of recurrent layers only:\n self._recurrent = [l for l in self.layers if isinstance(l, Recurrent)]\n self.connections = None\n self.full_steps = None\n\n def fprop(self, x, inference=False, init_state_list=None):\n\n if init_state_list is None:\n init_state_list = [None for _ in range(len(self._recurrent))]\n\n ii = 0 # index into init_state_list (decoder recurrent layer number)\n for l in self.layers:\n altered_tensor = l.be.distribute_data(x, l.parallelism)\n l.revert_list = [altered_tensor] if altered_tensor else []\n\n # special fprop for recurrent layers with init state\n if isinstance(l, Recurrent):\n x = l.fprop(x, inference=inference, init_state=init_state_list[ii])\n ii = ii + 1\n else:\n x = l.fprop(x, inference=inference)\n\n return x\n\n def set_connections(self, decoder_cons):\n self.connections = decoder_cons\n\n def bprop(self, error, inference=False, alpha=1.0, beta=0.0):\n \"\"\"\n bprop through layers, saving hidden_error for Recurrent layers\n \"\"\"\n hidden_error_list = []\n for l in reversed(self.layers):\n altered_tensor = l.be.distribute_data(error, l.parallelism)\n if altered_tensor:\n l.revert_list.append(altered_tensor)\n\n error = l.bprop(error)\n if isinstance(l, Recurrent):\n hidden_error_list.append(l.get_final_hidden_error())\n\n for tensor in l.revert_list:\n self.be.revert_tensor(tensor)\n\n # return hidden error in order of decoder layers\n # (to match decoder_connections)\n hidden_error_list.reverse()\n\n return hidden_error_list\n\n def switch_mode(self, inference):\n \"\"\"\n Dynamically grow or shrink the number of time steps to perform\n single time step fprop during inference.\n \"\"\"\n # set up parameters\n hasLUT = isinstance(self.layers[0], LookupTable)\n\n # sequence length is different dimension depending on whether there is LUT\n cur_steps = self.in_shape[0] if hasLUT else self.in_shape[1]\n if not inference:\n old_size = cur_steps\n # assumes encoder and decoder have the same sequence length\n new_size = self.full_steps\n else:\n old_size = cur_steps\n new_size = 1\n\n # resize buffers\n if old_size != new_size:\n if hasLUT:\n in_obj = (new_size, 1)\n self.layers[0].inputs = None # ensure \"allocate\" will reallocate this buffer\n self.layers[0].outputs_t = None\n else:\n in_obj = (self.out_shape[0], new_size)\n self.configure(in_obj=in_obj)\n # set layer outputs to None so they get reallocated\n for l in self.layers:\n if l.owns_output:\n l.outputs = None\n self.allocate(shared_outputs=None) # re-allocate deltas, but not weights\n for l in self.layers:\n l.name += \"'\"\n\n\nclass Seq2Seq(LayerContainer):\n \"\"\"\n Layer container that encapsulates encoder decoder pathways\n used for sequence to sequence models.\n\n Arguments:\n layers (list): Length two list specifying the encoder and decoder.\n The encoder must be provided as the first list element.\n List elements may be an Encoder and a Decoder container, or,\n similar to Tree and Broadcast containers, encoder (decoder) can be\n specified as a list of layers or a single layer, which are\n converted to Encoder and Decoder containers.\n decoder_connections (list of ints): for every recurrent decoder layer, specifies the\n corresponding encoder layer index (recurrent layers only)\n to get initial state from. The format will be, e.g.\n [0, 1, None].\n If not given, the container will try to make a\n one-to-one connections, which assumes an equal number\n of encoder and decoder recurrent layers.\n \"\"\"\n def __init__(self, layers, decoder_connections=None, name=None):\n\n assert len(layers) == 2, self.__class__.__name__ + \" layers argument must be length 2 list\"\n\n super(Seq2Seq, self).__init__(name=name)\n\n def get_container(l, cls):\n if isinstance(l, cls):\n return l\n elif isinstance(l, list):\n return cls(l)\n elif isinstance(l, Layer):\n return cls([l])\n else:\n ValueError(\"Incompatible element for \" + self.__class__.__name__ + \" container\")\n\n self.encoder = get_container(layers[0], Encoder)\n self.decoder = get_container(layers[1], Decoder)\n self.layers = self.encoder.layers + self.decoder.layers\n\n self.hasLUT = isinstance(self.encoder.layers[0], LookupTable)\n\n if decoder_connections:\n self.decoder_connections = decoder_connections\n else:\n # if decoder_connections not given, assume one to one connections between\n # an equal number of encoder and decoder recurrent layers\n assert len(self.encoder._recurrent) == len(self.decoder._recurrent)\n self.decoder_connections = np.arange(len(self.encoder._recurrent)).tolist()\n\n self.encoder.set_connections(self.decoder_connections)\n self.decoder.set_connections(self.decoder_connections)\n\n @classmethod\n def gen_class(cls, pdict):\n layers = [[], []]\n for i, layer in enumerate(pdict['layers']):\n typ = layer['type']\n ccls = load_class(typ)\n\n if i < pdict['num_encoder_layers']:\n layers[0].append(ccls.gen_class(layer['config']))\n else:\n layers[1].append(ccls.gen_class(layer['config']))\n\n # layers is special in that there may be parameters\n # serialized which will be used elsewhere\n lsave = pdict.pop('layers')\n pdict.pop('num_encoder_layers', None)\n new_cls = cls(layers=layers, **pdict)\n pdict['layers'] = lsave\n return new_cls\n\n def get_description(self, get_weights=False, keep_states=False):\n \"\"\"\n Get layer parameters. All parameters are needed for optimization, but\n only weights are serialized.\n\n Arguments:\n get_weights (bool, optional): Control whether all parameters are returned or\n just weights for serialization.\n keep_states (bool, optional): Control whether all parameters are returned\n or just weights for serialization.\n \"\"\"\n desc = super(Seq2Seq, self).get_description(get_weights=get_weights,\n keep_states=keep_states)\n\n desc['config']['num_encoder_layers'] = len(self.encoder.layers)\n desc['config']['decoder_connections'] = self.decoder_connections\n self._desc = desc\n return desc\n\n def configure(self, in_obj):\n # assumes Seq2Seq will always get dataset as in_obj\n self.encoder.configure(in_obj.shape)\n self.decoder.configure(in_obj.decoder_shape)\n\n self.parallelism = self.decoder.parallelism\n self.out_shape = self.decoder.out_shape\n self.in_shape = self.decoder.layers[-1].in_shape if self.hasLUT else self.decoder.in_shape\n # save full sequence length for switching between inference and non-inference modes\n self.decoder.full_steps = self.in_shape[1]\n return self\n\n def allocate(self, shared_outputs=None):\n self.decoder.allocate(shared_outputs)\n if any([l.owns_output for l in self.decoder.layers]):\n self.encoder.allocate()\n else:\n self.encoder.allocate(shared_outputs)\n # buffer for collecting time loop outputs\n self.xbuf = self.be.iobuf(self.out_shape)\n\n def allocate_deltas(self, global_deltas=None):\n self.encoder.allocate_deltas(global_deltas)\n self.decoder.allocate_deltas(global_deltas)\n\n def fprop(self, inputs, inference=False, beta=0.0):\n \"\"\"\n Forward propagation for sequence to sequence container. Calls\n fprop for the Encoder container followed by fprop for the Decoder\n container. If inference is True, the Decoder will be called with\n individual time steps in a for loop.\n \"\"\"\n # make sure we are in the correct decoder mode\n self.decoder.switch_mode(inference)\n\n if not inference:\n # load data\n (x, z) = inputs\n\n # fprop through Encoder layers\n x = self.encoder.fprop(x, inference=inference, beta=0.0)\n\n # get encoder hidden state\n init_state_list = self.encoder.get_final_states(self.decoder_connections)\n\n # fprop through Decoder layers\n x = self.decoder.fprop(z, inference=inference, init_state_list=init_state_list)\n else: # Loopy inference\n\n # prep data\n x = inputs\n new_steps = 1\n if self.hasLUT:\n z_shape = new_steps\n else:\n z_shape = (self.out_shape[0], new_steps)\n z = x.backend.iobuf(z_shape)\n\n # encoder\n x = self.encoder.fprop(x, inference=inference, beta=0.0)\n\n # get encoder hidden state\n init_state_list = self.encoder.get_final_states(self.decoder_connections)\n\n # decoder\n steps = self.in_shape[1]\n if self.hasLUT:\n z_argmax = x.backend.zeros((1, z.shape[0]*z.shape[1]))\n\n for t in range(steps):\n z = self.decoder.fprop(z, inference=inference, init_state_list=init_state_list)\n\n # transfer hidden state from DECODER to next step\n init_state_list = [recurrent.final_state()\n for recurrent in self.decoder._recurrent]\n\n # and write to output buffer\n self.xbuf[:, t*self.be.bsz:(t+1)*self.be.bsz] = z\n\n # handle input to LUT\n if self.hasLUT:\n z_argmax[:] = self.be.argmax(z, axis=0)\n z = z_argmax\n\n x = self.xbuf\n\n if inference:\n self.revert_tensors()\n\n return x\n\n def bprop(self, error, inference=False, alpha=1.0, beta=0.0):\n \"\"\"\n Backpropagation for sequence to sequence container. Calls Decoder container\n bprop followed by Encoder container bprop.\n \"\"\"\n\n hidden_error_list = self.decoder.bprop(error)\n self.encoder.bprop(hidden_error_list)\n\n return self.encoder.layers[0].deltas\n\n\nclass Multicost(NervanaObject):\n \"\"\"\n Class used to compute cost from a Tree container with multiple outputs.\n The number of costs must match the number of outputs. Costs will be applied to the outputs\n in the same order that they occur in the Tree.\n\n The targets used for the cost can either be provided from the dataset as a list or tuple,\n one for each cost, or, if only a single target is provided, the same target is used for all\n costs. This is useful for providing multiple cost branches computing the same error at\n different stages of the network as in GoogLeNet.\n \"\"\"\n\n def __init__(self, costs, weights=None, name=None):\n super(Multicost, self).__init__(name)\n self.costs = costs\n self.weights = [1.0 for c in costs] if weights is None else weights\n self.deltas = None\n self.inputs = None\n self.costfunc = costs[0].costfunc # For displaying during callbacks\n\n def initialize(self, in_obj):\n \"\"\"\n Determine dimensions of cost and error buffers and allocate space from the input layer\n\n Arguments:\n in_obj (Layer): input layer from which to calculate costs\n \"\"\"\n if isinstance(in_obj, LayerContainer):\n terminals = in_obj.get_terminal()\n elif isinstance(in_obj, list):\n terminals = in_obj\n else:\n raise RuntimeError(\"Multicost must be passed a container or list\")\n\n for c, ll in zip(self.costs, terminals):\n c.initialize(ll)\n\n @property\n def cost(self):\n \"\"\" Get cost. \"\"\"\n return self.costs[0].cost\n\n @property\n def outputs(self):\n \"\"\" Get outputs. \"\"\"\n return self.costs[0].outputs\n\n def get_description(self, **kwargs):\n \"\"\"\n Get layer parameters.\n\n Arguments:\n **kwargs: ignored\n \"\"\"\n desc = super(Multicost, self).get_description()\n costs = desc['config'].pop('costs')\n desc['config']['costs'] = []\n for cost in costs:\n desc['config']['costs'].append(cost.get_description())\n self._desc = desc\n return desc\n\n def get_cost(self, inputs, targets):\n \"\"\"\n Compute the cost function over a list of inputs and targets.\n\n Arguments:\n inputs (list(Tensor)): list of Tensors containing input values to be compared to\n targets\n targets (Tensor, list(Tensor)): either a list of Tensors containing target values, or\n a single target Tensor that will be mapped to each\n input\n\n Returns:\n Tensor containing cost\n \"\"\"\n if not isinstance(inputs, list):\n return self.costs[0].get_cost(inputs, targets)\n else:\n ltargets = targets if type(targets) in (tuple, list) else [targets for c in self.costs]\n costvals = [c.get_cost(i, t) for c, i, t in zip(self.costs, inputs, ltargets)]\n sum_optree = reduce(add, [w * c for w, c in zip(self.weights, costvals)])\n costvals[0][:] = sum_optree\n return costvals[0]\n\n def get_errors(self, inputs, targets):\n \"\"\"\n Get a list of errors for backpropagating to a Tree container that has multiple output\n nodes.\n\n Arguments:\n inputs (list(Tensor)): list of Tensors containing input values to be compared to\n targets\n targets (Tensor, list(Tensor)): either a list of Tensors containing target values, or\n a single target Tensor that will be mapped to each\n input\n Returns:\n list of Tensors containing errors for each input\n \"\"\"\n l_targets = targets if type(targets) in (tuple, list) else [targets for c in self.costs]\n for cost, i, t, we in zip(self.costs, inputs, l_targets, self.weights):\n cost.get_errors(i, t)\n if isinstance(cost.deltas, list):\n for delta in cost.deltas:\n delta[:] *= we\n else:\n cost.deltas[:] *= we\n\n if self.deltas is None:\n self.deltas = [c.deltas for c in self.costs]\n\n return self.deltas\n" }, { "alpha_fraction": 0.6883720755577087, "alphanum_fraction": 0.6930232644081116, "avg_line_length": 19.428571701049805, "blob_id": "eb5a2e76ba6e514504c19bfe0637695240509e0b", "content_id": "33ff43ea81a78d55819a2fe0e04a8468be3cff00", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 430, "license_type": "permissive", "max_line_length": 98, "num_lines": 21, "path": "/code/lipreading/test.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport sys\nimport os\nimport time\n\nimport numpy as np\n\ndef unpickle(file):\n import cPickle\n fo = open(file, 'rb')\n dict = cPickle.load(fo)\n fo.close()\n return dict\n\ndata = unpickle(os.path.join(os.path.expanduser('~/TCDTIMIT/database_binaryViseme/Lipspkr1.pkl')))\nprint(data.keys())\nprint(data)\n\nthisN = data['data'].shape[0]\nprint(\"This dataset contains \", thisN, \" images\")\n\n" }, { "alpha_fraction": 0.6554765105247498, "alphanum_fraction": 0.6890469193458557, "avg_line_length": 35.24742126464844, "blob_id": "7c1ce30654b8f92462e0fb5855f393d2ea6528fa", "content_id": "bae27664ab59d8892bbfb9c3a6c7935c2179cc1a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3515, "license_type": "permissive", "max_line_length": 100, "num_lines": 97, "path": "/code/Experiments/Tutorials/nn-from-scratch/StanfordNN_GPU.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sklearn\nimport sklearn.datasets\nimport theano\nimport theano.tensor as T\nimport time\n\n# Use float32 as the default float data type\ntheano.config.floatX = 'float32'\n\n# Generate a dataset\nnp.random.seed(0)\ntrain_X, train_y = sklearn.datasets.make_moons(5000, noise=0.20)\ntrain_y_onehot = np.eye(2)[train_y]\n\n# Size definitions\nnum_examples = len(train_X) # training set size\nnn_input_dim = 2 # input layer dimensionality\nnn_output_dim = 2 # output layer dimensionality\nnn_hdim = 1000 # hiden layer dimensionality\n\n# Gradient descent parameters (I picked these by hand)\nepsilon = np.float32(0.01) # learning rate for gradient descent\nreg_lambda = np.float32(0.01) # regularization strength\n\n# GPU NOTE: Conversion to float32 to store them on the GPU!\nX = theano.shared(train_X.astype('float32')) # initialized on the GPU\ny = theano.shared(train_y_onehot.astype('float32'))\n\n# GPU NOTE: Conversion to float32 to store them on the GPU!\nW1 = theano.shared(np.random.randn(nn_input_dim, nn_hdim).astype('float32'), name='W1')\nb1 = theano.shared(np.zeros(nn_hdim).astype('float32'), name='b1')\nW2 = theano.shared(np.random.randn(nn_hdim, nn_output_dim).astype('float32'), name='W2')\nb2 = theano.shared(np.zeros(nn_output_dim).astype('float32'), name='b2')\n\n# Forward propagation\nz1 = X.dot(W1) + b1\na1 = T.tanh(z1)\nz2 = a1.dot(W2) + b2\ny_hat = T.nnet.softmax(z2)\n\n# The regularization term (optional)\nloss_reg = 1. / num_examples * reg_lambda / 2 * (T.sum(T.sqr(W1)) + T.sum(T.sqr(W2)))\n# the loss function we want to optimize\nloss = T.nnet.categorical_crossentropy(y_hat, y).mean() + loss_reg\n# Returns a class prediction\nprediction = T.argmax(y_hat, axis=1)\n\n# Gradients\ndW2 = T.grad(loss, W2)\ndb2 = T.grad(loss, b2)\ndW1 = T.grad(loss, W1)\ndb1 = T.grad(loss, b1)\n\n# Note that we removed the input values because we will always use the same shared variable\n# GPU NOTE: Removed the input values to avoid copying data to the GPU.\nforward_prop = theano.function([], y_hat)\ncalculate_loss = theano.function([], loss)\npredict = theano.function([], prediction)\n\n# GPU NOTE: Removed the input values to avoid copying data to the GPU.\ngradient_step = theano.function(\n [],\n # profile=True,\n updates=((W2, W2 - epsilon * dW2),\n (W1, W1 - epsilon * dW1),\n (b2, b2 - epsilon * db2),\n (b1, b1 - epsilon * db1)))\n\n\ndef build_model(num_passes=20000, print_loss=False):\n # Re-Initialize the parameters to random values. We need to learn these.\n np.random.seed(0)\n # GPU NOTE: Conversion to float32 to store them on the GPU!\n W1.set_value((np.random.randn(nn_input_dim, nn_hdim) / np.sqrt(nn_input_dim)).astype('float32'))\n b1.set_value(np.zeros(nn_hdim).astype('float32'))\n W2.set_value((np.random.randn(nn_hdim, nn_output_dim) / np.sqrt(nn_hdim)).astype('float32'))\n b2.set_value(np.zeros(nn_output_dim).astype('float32'))\n\n # Gradient descent. For each batch...\n for i in xrange(0, num_passes):\n # This will update our parameters W2, b2, W1 and b1!\n gradient_step()\n\n # Optionally print the loss.\n # This is expensive because it uses the whole dataset, so we don't want to do it too often.\n if print_loss and i % 1000 == 0:\n print \"Loss after iteration %i: %f\" % (i, calculate_loss())\n\n# Profiling\n# theano.config.profile = True\n# theano.config.profile_memory = True\n# gradient_step()\n# theano.printing.debugprint(gradient_step)\n# print gradient_step.profile.summary()\n\ngradient_step()" }, { "alpha_fraction": 0.5386111736297607, "alphanum_fraction": 0.5491149425506592, "avg_line_length": 34.70833206176758, "blob_id": "85ab1b60e38f2c3fba632c735d4a27b903bf93b1", "content_id": "675c007042a876a2bd212139fe80e008455fb805", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5141, "license_type": "permissive", "max_line_length": 95, "num_lines": 144, "path": "/code/lipreading/preprocessImage.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "#### help functions\nfrom __future__ import print_function\n\n# remove without complaining\nimport os, errno\nimport subprocess\nimport getopt\nimport traceback\nimport zipfile, os.path\nimport concurrent.futures\nimport threading\nimport time\nimport shutil\nimport sys\nimport glob\nfrom os import listdir\nfrom os.path import isfile, join\n\n\nimport numpy as np\nimport scipy.io as sio\nimport dlib\nfrom skimage import io\nfrom skimage import data\nfrom skimage.transform import resize\nfrom skimage.color import rgb2gray\nfrom skimage import img_as_ubyte\n\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"Preprocessing image\")\nadd_arg = parser.add_argument\nadd_arg(\"-i\", \"--input_image\", help=\"Input image\")\nargs = parser.parse_args()\n\n\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(\"./shape_predictor_68_face_landmarks.dat\")\n \ndef detectMouth(imagePath):\n dets = []\n fname, ext = os.path.splitext(os.path.basename(imagePath))\n f = imagePath\n if ext == \".jpg\":\n try:\n # print(f)\n facePath = \"testImages\" + os.sep + fname + \"_face.jpg\"\n mouthPath = \"testImages\" + os.sep + fname + \"_mouth.jpg\"\n \n img = io.imread(f, as_grey=True)\n width, height = img.shape[:2]\n \n # detect face, then keypoints. Store face and mouth\n # resize with factor 4 to increase detection speed\n resizer = 4\n dim = (int(width / resizer), int(height / resizer))\n imgSmall = resize(img, dim)\n imgSmall = img_as_ubyte(imgSmall)\n \n dets = detector(imgSmall, 1) # detect face\n if len(dets) == 0:\n # print(\"looking on full-res image...\")\n resizer = 1\n dim = (int(width / resizer), int(height / resizer))\n imgSmall = resize(img, dim)\n imgSmall = img_as_ubyte(imgSmall)\n \n dets = detector(imgSmall, 1)\n if len(dets) == 0:\n print(f)\n print(\"still no faces found. Using previous face coordinates...\")\n if 'top' in locals(): # could be issue if no face in first image ? #TODO\n face_img = img[top:bot, left:right]\n io.imsave(facePath, face_img)\n mouth_img = img[my:my + mh, mx:mx + mw]\n io.imsave(mouthPath, mouth_img)\n else:\n print(\"top not in locals. ERROR\")\n \n d = dets[0]\n # extract face, store in storeFacesDir\n left = d.left() * resizer\n right = d.right() * resizer\n top = d.top() * resizer\n bot = d.bottom() * resizer\n # go no further than img borders\n if (left < 0): left = 0\n if (right > width): right = width\n if (top < 0): top = 0\n if (bot > height): bot = height\n face_img = img[top:bot, left:right]\n io.imsave(facePath, face_img) # save face image\n \n # now detect mouth landmarks\n # detect 68 keypoints, see dlibLandmarks.png\n shape = predictor(imgSmall, d)\n # Get the mouth landmarks.\n mx = shape.part(48).x * resizer\n mw = shape.part(54).x * resizer - mx\n my = shape.part(31).y * resizer\n mh = shape.part(57).y * resizer - my\n # go no further than img borders\n if (mx < 0): mx = 0\n if (mw > width): mw = width\n if (my < 0): my = 0\n if (mh > height): mh = height\n \n # scale them to get a better image of the mouth\n widthScalar = 1.5\n heightScalar = 1\n mx = int(mx - (widthScalar - 1) / 2.0 * mw)\n # my = int(my - (heightScalar - 1)/2.0*mh) #not needed, we already have enough nose\n mw = int(mw * widthScalar)\n mh = int(mh * widthScalar)\n \n mouth_img = img[my:my + mh, mx:mx + mw]\n io.imsave(mouthPath, mouth_img)\n return mouthPath\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n print(traceback.format_exc())\n return -1\n \n \ndef resize_image (filePath, filePathResized, keepAR=False, width=120.0):\n im = io.imread(filePath)\n if keepAR: #Aspect Ratio\n r = width / im.shape[1]\n dim = (int(im.shape[0] * r), int(width))\n im_resized = resize(im, dim)\n else:\n im_resized = resize(im, (120, 120))\n io.imsave(filePathResized, im_resized)\n\ndef convertToGrayscale(oldFilePath, newFilePath):\n img_gray = rgb2gray(io.imread(oldFilePath))\n io.imsave(newFilePath, img_gray) # don't write to disk if already exists\n return newFilePath\n \nif __name__ == \"__main__\":\n print(\"Compiling functions...\")\n mouthPath = detectMouth(args.input_image) # expects npz model\n grayMouthPath = convertToGrayscale(mouthPath, mouthPath)\n resize_image(grayMouthPath, grayMouthPath)" }, { "alpha_fraction": 0.4389341175556183, "alphanum_fraction": 0.4478164315223694, "avg_line_length": 33.20252990722656, "blob_id": "9f68ce85584de14ef5cc7632a266c3fbc85ccb50", "content_id": "1f13d7def34b9c444cbedcfd14862b42686e6f79", "detected_licenses": [ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 2702, "license_type": "permissive", "max_line_length": 112, "num_lines": 79, "path": "/code/audioSR/HTK/htk/HLMTools/Makefile", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# ----------------------------------------------------------- \n# \n# ___ \n# |_| | |_/ SPEECH \n# | | | | \\ RECOGNITION \n# ========= SOFTWARE \n# \n# \n# ----------------------------------------------------------- \n# Copyright: Cambridge University\n# 1995-2005 Engineering Department\n# http://htk.eng.cam.ac.uk\n# http://mi.eng.cam.ac.uk\n# \n# Use of this software is governed by a License Agreement \n# ** See the file License for the Conditions of Use ** \n# ** This banner notice must not be removed ** \n# \n# ----------------------------------------------------------- \n# File: HLMTools/Makefile. Generated from Makefile.in by configure.\n# ----------------------------------------------------------- \n\nSHELL =\t/bin/sh\ninc = \t../HTKLib\nsrcdir = .\ntop_srcdir = ..\n\nprefix = /usr/local\nexec_prefix = ${prefix}\nbindir = ${exec_prefix}/bin\nsbindir = ${exec_prefix}/sbin\nlibexecdir = ${exec_prefix}/libexec\ndatadir = ${prefix}/share\nsysconfdir = ${prefix}/etc\nsharedstatedir = ${prefix}/com\nlocalstatedir = ${prefix}/var\nlibdir = ${exec_prefix}/lib\ninfodir = ${prefix}/share/info\nmandir = ${prefix}/share/man\nincludedir = ${prefix}/include\noldincludedir = /usr/include\nhlib = \t../HTKLib\nllib = \t../HLMLib\nHLIBS = \t$(hlib)/HTKLib.a $(llib)/HLMLib.a\nCC = \tgcc\nCFLAGS = \t-m32 -ansi -D_SVID_SOURCE -DOSS_AUDIO -D'ARCH=\"x86_64\"' -Wall -Wno-switch -g -O2 -I$(hlib) -I$(llib) \nLDFLAGS = \t-L/usr/X11R6/lib $(HLIBS) -lm\nINSTALL = \t/usr/bin/install -c\nPROGS =\tCluster HLMCopy LAdapt LBuild LFoF \\\n\t\tLGCopy LGList LGPrep LLink LMerge \\\n\t\tLNewMap LNorm LPlex LSubset \n\nall: $(PROGS)\n\n# build $(PROGS)\n%: %.c $(HLIBS)\n\tif [ ! -d $(bindir) -a X_ = X_yes ] ; then mkdir -p $(bindir) ; fi\n\t$(CC) -o $@ $(CFLAGS) $^ $(LDFLAGS)\n\tif [ X_ = X_yes ] ; then $(INSTALL) -m 755 $@ $(bindir) ; fi\n\nstrip: $(PROGS)\n\t-strip $(PROGS)\n\nclean:\n\t-rm -f *.o \n\ncleanup:\n\t-rm -f *.o $(PROGS) *.exe\n\ndistclean:\n\t-rm -f *.o $(PROGS) Makefile *.exe\n\ninstall: mkinstalldir $(PROGS)\n\tfor program in $(PROGS) ; do $(INSTALL) -m 755 $${program} $(bindir) ; done\n\nmkinstalldir:\n if [ ! -d $(bindir) -a X_ = X_yes ] ; then mkdir -p $(bindir) ; fi\n\n.PHONY: all strip clean cleanup distclean install mkinstalldir\n" }, { "alpha_fraction": 0.6498906016349792, "alphanum_fraction": 0.667396068572998, "avg_line_length": 25.941177368164062, "blob_id": "74c169bcf86e71f3ab657444f2fb0fd529232edc", "content_id": "61bab220046a119a6db34cd4d473fedc0d8f9c03", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 457, "license_type": "permissive", "max_line_length": 63, "num_lines": 17, "path": "/code/Experiments/Tutorials/nn-from-scratch/matplotTest.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\nplt.close(\"all\")\n# Compute the x and y coordinates for points on a sine curve\nx = np.arange(-2*np.pi, 3 * np.pi, 0.1)\ny=[]\nfor i in range(0,10):\n y.append(np.tanh(x+i-5))\n\n# Plot the points using matplotlib\nfor i in range(len(y)):\n plt.plot(x, y[i])\n plt.legend(str(i))\nplt.title(\"this is a test\")\nplt.plot(x,np.sin(x),label=\"sine\")\nplt.show() # You must call plt.show() to make graphics appear." }, { "alpha_fraction": 0.5837677121162415, "alphanum_fraction": 0.6028791069984436, "avg_line_length": 28.9769229888916, "blob_id": "3077f49cbff9aba3269f85daff3fe733b9295133", "content_id": "0c0f01b7fa76952593d9cae0f6dc342f1e87db0b", "detected_licenses": [ "Apache-2.0", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4029, "license_type": "permissive", "max_line_length": 98, "num_lines": 130, "path": "/code/Experiments/BinaryNet-master/Run-time/binary_kernels.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\r\nimport time\r\n\r\nimport numpy as np\r\n\r\nimport pycuda.driver as cuda\r\nimport pycuda.autoinit\r\nfrom pycuda.compiler import SourceModule\r\n \r\ndef gemm(gemm_kernel,A,B,C,A_rows,A_cols,B_cols):\r\n\r\n # dimensions\r\n assert A_cols%16 == 0 # Block size\r\n\r\n # Launching GEMM GPU kernel \r\n block_size = 16\r\n block = (block_size,block_size,1)\r\n grid = (B_cols / block_size+1, A_rows / block_size+1) # better too many blocks than too little\r\n gemm_kernel(A,B,C, np.intc(A_rows), np.intc(A_cols), np.intc(B_cols), block= block, grid=grid)\r\n \r\ndef concatenation_rows(concatenate_rows_kernel,A,A_conc,A_rows,A_cols):\r\n \r\n assert A_cols%32 == 0 # concatenation step\r\n \r\n block_size = 64 \r\n block = (block_size,1,1)\r\n grid = (A_rows*A_cols/(block_size*32)+1,1)\r\n concatenate_rows_kernel(A,A_conc, np.intc(A_rows*A_cols/32), block= block, grid=grid)\r\n \r\ndef concatenation_cols(concatenate_cols_kernel,A,A_conc,A_rows,A_cols):\r\n \r\n assert A_rows%32 == 0 # concatenation step\r\n \r\n block_size = 64 \r\n block = (block_size,1,1)\r\n grid = (A_cols/block_size+1,1)\r\n concatenate_cols_kernel(A,A_conc, np.intc(A_rows), np.intc(A_cols), block= block, grid=grid)\r\n \r\n \r\ndef sign(x):\r\n return np.float32(2.*np.greater_equal(x,0)-1.)\r\n \r\nif __name__ == \"__main__\": \r\n \r\n context = pycuda.autoinit.context\r\n \r\n print \"Building the kernels...\"\r\n \r\n mod = SourceModule(open(\"binary_kernels.cu\").read())\r\n gemm_kernel = mod.get_function(\"gemm\")\r\n concatenate_rows_kernel = mod.get_function(\"concatenate_rows_kernel\")\r\n concatenate_cols_kernel = mod.get_function(\"concatenate_cols_kernel\")\r\n xnor_gemm_kernel = mod.get_function(\"xnor_gemm\")\r\n \r\n print \"Loading matrices to device...\"\r\n \r\n # Matrices dimensions\r\n N = 8192\r\n A_rows = N\r\n A_cols = N\r\n B_cols = N\r\n # A_rows = 784\r\n # A_cols = 1024\r\n # B_cols = 4096\r\n \r\n # A is a matrix randomly filled with 1 and -1\r\n A = sign(np.random.randn(A_rows,A_cols))\r\n A = A.astype(np.float32)\r\n A_gpu = cuda.mem_alloc(A.nbytes)\r\n cuda.memcpy_htod(A_gpu, A)\r\n \r\n # B is a matrix randomly filled with 1 and -1\r\n B = sign(np.random.randn(A_cols,B_cols))\r\n B = B.astype(np.float32)\r\n B_gpu = cuda.mem_alloc(B.nbytes)\r\n cuda.memcpy_htod(B_gpu, B)\r\n \r\n # C is the resulting matrix\r\n C1 = np.zeros((A_rows,B_cols)).astype(np.float32)\r\n C2 = np.zeros((A_rows,B_cols)).astype(np.float32)\r\n C_gpu = cuda.mem_alloc(C1.nbytes)\r\n \r\n print \"XNOR kernel...\"\r\n \r\n # wait until the GPU is done with the work\r\n context.synchronize()\r\n # kernel timing\r\n start_time = time.time()\r\n \r\n # concatenate A\r\n A_conc = cuda.mem_alloc(A.nbytes/32)\r\n concatenation_rows(concatenate_rows_kernel,A_gpu,A_conc,A_rows,A_cols)\r\n # concatenate B\r\n B_conc = cuda.mem_alloc(B.nbytes/32)\r\n concatenation_cols(concatenate_cols_kernel,B_gpu,B_conc,A_cols,B_cols)\r\n # XNOR GEMM\r\n gemm(xnor_gemm_kernel,A_conc,B_conc,C_gpu,A_rows,A_cols/32,B_cols)\r\n # Free concatenated memory\r\n A_conc.free()\r\n B_conc.free()\r\n \r\n # wait until the GPU is done with the work\r\n context.synchronize()\r\n # kernel timing\r\n execution_time = time.time() - start_time\r\n print(\" execution_time = \"+str(execution_time)+\"s\")\r\n \r\n # get the result\r\n cuda.memcpy_dtoh(C2,C_gpu)\r\n \r\n print \"Baseline kernel...\"\r\n \r\n # wait until the GPU is done with the work\r\n context.synchronize()\r\n # kernel timing\r\n start_time = time.time()\r\n\r\n gemm(gemm_kernel,A_gpu,B_gpu,C_gpu,A_rows,A_cols,B_cols)\r\n \r\n # wait until the GPU is done with the work\r\n context.synchronize()\r\n # kernel timing\r\n execution_time = time.time() - start_time\r\n print(\" execution_time = \"+str(execution_time)+\"s\")\r\n \r\n # get the result\r\n cuda.memcpy_dtoh(C1,C_gpu)\r\n \r\n print \"Comparing the results...\"\r\n \r\n print \" np.allclose(C1, C2) = \" + str(np.allclose(C1, C2))\r\n" }, { "alpha_fraction": 0.6204704642295837, "alphanum_fraction": 0.6230133771896362, "avg_line_length": 24.387096405029297, "blob_id": "110d3fe2c883c61441e9af1d89c353229bf48347", "content_id": "9372d1d9a594609ba75389ff25d717aa7ab7667d", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1573, "license_type": "permissive", "max_line_length": 68, "num_lines": 62, "path": "/code/audioSR/Spoken-language-identification-master/theano/networks/base_network.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import cPickle as pickle\n\n\nclass BaseNetwork:\n\t\n\tdef say_name(self):\n\t\treturn \"unknown\"\n\t\n\t\n\tdef save_params(self, file_name, epoch, **kwargs):\n\t\twith open(file_name, 'w') as save_file:\n\t\t\tpickle.dump(\n\t\t\t\tobj = {\n\t\t\t\t\t'params' : [x.get_value() for x in self.params],\n\t\t\t\t\t'epoch' : epoch, \n\t\t\t\t},\n\t\t\t\tfile = save_file,\n\t\t\t\tprotocol = -1\n\t\t\t)\n\t\n\t\n\tdef load_state(self, file_name):\n\t\tprint \"==> loading state %s\" % file_name\n\t\tepoch = 0\n\t\twith open(file_name, 'r') as load_file:\n\t\t\tdict = pickle.load(load_file)\n\t\t\tloaded_params = dict['params']\n\t\t\tfor (x, y) in zip(self.params, loaded_params):\n\t\t\t\tx.set_value(y)\n\t\t\tepoch = dict['epoch']\n\t\treturn epoch\n\n\n\tdef get_batches_per_epoch(self, mode):\n\t\tif (mode == 'train' or mode == 'predict_on_train'):\n\t\t\treturn len(self.train_list_raw) / self.batch_size\n\t\telif (mode == 'test' or mode == 'predict'):\n\t\t\treturn len(self.test_list_raw) / self.batch_size\n\t\telse:\n\t\t\traise Exception(\"unknown mode\")\n\t\n\t\n\tdef step(self, batch_index, mode):\n\t\t\n\t\tif (mode == \"train\"):\n\t\t\tdata, answers = self.read_batch(self.train_list_raw, batch_index)\n\t\t\ttheano_fn = self.train_fn\n\t\telif (mode == \"test\" or mode == \"predict\"):\n\t\t\tdata, answers = self.read_batch(self.test_list_raw, batch_index)\n\t\t\ttheano_fn = self.test_fn\n\t\telif (mode == \"predict_on_train\"):\n\t\t\tdata, answers = self.read_batch(self.train_list_raw, batch_index)\n\t\t\ttheano_fn = self.test_fn\n\t\telse:\n\t\t\traise Exception(\"unrecognized mode\")\n\t\t\n\t\tret = theano_fn(data, answers)\n\t\treturn {\"prediction\": ret[0],\n\t\t\t\t\"answers\": answers,\n\t\t\t\t\"current_loss\": ret[1],\n\t\t\t\t\"log\": \"\",\n\t\t\t\t}" }, { "alpha_fraction": 0.7359550595283508, "alphanum_fraction": 0.7528089880943298, "avg_line_length": 16.799999237060547, "blob_id": "fa821cdb669df2a1638bfa78acd4ebceb1367bec", "content_id": "62ac0fe270f7b6fe40be87eccf7d8a87fc296c64", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 178, "license_type": "permissive", "max_line_length": 68, "num_lines": 10, "path": "/code/dataset/TIMIT_crawler/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "# TIMIT_Crawler\nA crawler which could download the data of the TIMIT speech database\n\n### User Guide:\n\n1.Clone the project into your computer.\n\n2.Run the crawler.py\n\n3.All Done!\n" }, { "alpha_fraction": 0.818493127822876, "alphanum_fraction": 0.818493127822876, "avg_line_length": 57.599998474121094, "blob_id": "fb8d382ed5e67d2679998db726840b4f09973547", "content_id": "cc596b243946a0cff940d48f3273849531fea053", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 292, "license_type": "permissive", "max_line_length": 101, "num_lines": 5, "path": "/README.md", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "For the database downloading, preprocessing etc: see https://github.com/matthijsvk/TCDTIMITprocessing\n\nFor the lipreading aspect, see the folder 'code/lipreading'\nFor the auditive speech recognition aspect, see 'code/audioSR'\nFor the combination of lipreading and audio, see 'code/combinedSR'" }, { "alpha_fraction": 0.6359125375747681, "alphanum_fraction": 0.6478588581085205, "avg_line_length": 40.219696044921875, "blob_id": "0773a5eda86a6f263d0cd7fe929e722d3185dd3e", "content_id": "b59d6cdf8cf3e2232411ce958987ffdb9889790a", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5441, "license_type": "permissive", "max_line_length": 150, "num_lines": 132, "path": "/code/audioSR/Preprocessing/prepareWAV_HTK.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import os\nimport string\nimport sys\nimport exceptions\nimport errno\n\n# Search a dir for wav files, generate files with paths so HTK can process them\n\n# search recursively through a directory, collecting paths of all wav files.\n# Then a .scp file (just a txt really) is written, containing the wav path, a space, and the mfc path.\n# This mfc path is the destination that the HTK toolkit will write the output MFC file to.\n# so it will look like this:\n# wavPath_file1/ mfcPath_file1/\n# wavPath_file2/ mfcPath_file2/\n# etc...\n# After executing this file, run 'HCopy -A -D -T 1 -C wav_config -S wavAndMFCCPaths.scp' in the directory where the .scp file is stored\n# after the mfc files are generated, I manually copied them back to the data folder, so they are stored together with the wav and label files\n\n# If that gives an error (eg \"Input file is not in RIFF format\"), you can run fixWavs.py, using the output files from this script as input to fixWavs.\n\n# see http://www.voxforge.org/home/dev/acousticmodels/linux/create/htkjulius/tutorial/data-prep/step-5\n\ndef getWavMFCCLocations(baseDir):\n dirs = []\n wavs = []\n wavAndMFCC = []\n for root, directories, filenames in os.walk(baseDir):\n for directory in directories:\n # delete empty directories\n dirPath = os.path.join(root,directory)\n try:\n os.rmdir(dirPath)\n except OSError as ex:\n if ex.errno == errno.ENOTEMPTY:\n dirs.append(dirPath)\n for file in filenames:\n path = os.path.join(root, file)\n # delete empty files (or almost empty, eg 44 bytes)\n if os.stat(path).st_size <= 44:\n os.remove(path)\n if os.path.splitext(path)[1].lower() == '.wav':\n wavs.append(path)\n\n # change store dir of MFCC file of this WAV\n thisDir = os.path.dirname(path)\n relPath = os.path.relpath(thisDir, baseDir)\n newPath = baseDir + os.sep + \"mfc\" + os.sep + relPath\n # Create directory structure if needed\n if not os.path.exists(newPath):\n os.makedirs(newPath)\n pathMFCC = newPath + os.sep + os.path.splitext(os.path.basename(path))[0] + \".mfc\" #change extension\n\n wavAndMFCC.append(path + \" \" + pathMFCC)\n\n wavs.sort(key=string.lower)\n dirs.sort(key=string.lower)\n wavAndMFCC.sort(key=string.lower)\n return dirs, wavs, wavAndMFCC\n\n\n\ndef prepareWAV_HTK(baseDir = os.path.expanduser('~/TCDTIMIT/TIMIT/TIMIT'), fileStoreDir = os.path.expanduser('~/TCDTIMIT/TIMIT/TIMIT') ):\n from helpFunctions import *\n\n print \"Searching for WAVs in: \", baseDir\n\n dirs, wavs, wavAndMFCC = getWavMFCCLocations(baseDir)\n print \"Dirs: \", dirs[0:3]\n print \"Wavs: \", wavs[0:3]\n print \"Wav + MFCC: \", wavAndMFCC[0:3]\n print \"Number of files: \", len(wavs)\n\n # Write the files\n print \"Writing the files...\"\n\n wavFilePath = fileStoreDir + os.sep + 'wavPaths.txt'\n writeToTxt(wavs, wavFilePath)\n\n wavAndMFCCFilePath = fileStoreDir + os.sep + 'wavAndMFCCPaths.scp'\n writeToTxt(wavAndMFCC, wavAndMFCCFilePath)\n\n # write wav_config file\n # from the HTK manual, p31: In brief, they specify that the target parameters are to be MFCC using C 0 as the energy\n # component, the frame period is 10msec (HTK uses units of 100ns), the output should be saved in\n # compressed format, and a crc checksum should be added. The FFT should use a Hamming window\n # and the signal should have first order preemphasis applied using a coefficient of 0.97. The filterbank\n # should have 26 channels and 12 MFCC coefficients should be output. The variable ENORMALISE is\n # by default true and performs energy normalisation on recorded audio files. It cannot be used with\n # live audio and since the target system is for live audio, this variable should be set to false.\n wav_config = [\n \"SOURCEFORMAT = WAV\",\n \"TARGETKIND = MFCC_0_D\",\n \"TARGETRATE = 100000.0\",\n \"SAVECOMPRESSED = T\",\n \"SAVEWITHCRC = T\",\n \"WINDOWSIZE = 250000.0\",\n \"USEHAMMING = T\",\n \"PREEMCOEF = 0.97\",\n \"NUMCHANS = 26\",\n \"CEPLIFTER = 22\",\n \"NUMCEPS = 12\"\n ]\n wavConfigPath = fileStoreDir + os.sep + 'wav_config'\n writeToTxt(wav_config, wavConfigPath)\n\n print \"Done.\"\n print \"List of wavs has been written to: \", wavFilePath\n print \"List of wavs + MFCC has been written to: \", wavAndMFCCFilePath\n print \"Wav_config for HTK has been written to: \", wavConfigPath\n print \"Now run 'HCopy -A -D -T 1 -C wav_config -S wavAndMFCCPaths.scp' in the directory where the .scp file is stored\"\n return 0\n\n\nif __name__ == '__main__':\n # SPECIFY Default SOURCE FOLDER\n # TIMIT:\n baseDir = os.path.expanduser('~/TCDTIMIT/TIMIT/TIMIT')\n # TCDTIMIT:\n # baseDir = os.path.expanduser('/media/matthijs/TOSHIBA_EXT/TCDTIMIT')\n # TIMIT 2, maybe incomplete:\n # TIMIT: #'~/TCDTIMIT/TIMITaudio/wav') #\n\n # MFC files will be stored in 'mfc' folder on the same level as the 'wav' folder\n fileStoreDir = baseDir # os.path.dirname(baseDir) # store one level above.\n\n nbArgs = len(sys.argv)\n if (nbArgs ==1):\n prepareWAV_HTK(sys.argv[1])\n elif (nbArgs==2):\n prepareWAV_HTK(sys.argv[1], sys.argv[2])\n else:\n print \"ERROR, too many arguments\"\n" }, { "alpha_fraction": 0.5611566305160522, "alphanum_fraction": 0.5693449378013611, "avg_line_length": 38.09000015258789, "blob_id": "f3d0090225a9740fe259530a5646e69b16e24067", "content_id": "62f8e39a566b60e8cbe88edd3ea2711c882bea24", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3908, "license_type": "permissive", "max_line_length": 122, "num_lines": 100, "path": "/code/lipreading/evaluateNetwork.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "from __future__ import print_function\n\nimport time\nfrom collections import OrderedDict\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nimport pickle\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\nfrom theano.scalar.basic import UnaryScalarOp, same_out_nocomplex\nfrom theano.tensor.elemwise import Elemwise\n\nfrom lipreadingTCDTIMIT import *\n\ndef load_model (model_npz_file):\n if not os.path.exists(model_npz_file): print(\n \"This npz file does not exist! Please run 'lipreadingTCDTIMIT' first to generate it.\")\n\n alpha = .1\n print(\"alpha = \" + str(alpha))\n epsilon = 1e-4\n print(\"epsilon = \" + str(epsilon))\n\n # activation\n activation = T.nnet.relu\n print(\"activation = T.nnet.relu\")\n input = T.tensor4('inputs')\n target = T.matrix('targets')\n cnn = buildNetworks.build_network_resnet50(input)\n\n with np.load('./results/ResNet50/allLipspeakers/allLipspeakers.npz') as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n\n lasagne.layers.set_all_param_values(cnn['prob'], param_values)\n return cnn\n\ndef getPhonemeToVisemeMap():\n map = {'f':'A','v':'A',\n 'er':'B','ow':'B','r':'B','q':'B','w':'B','uh':'B','uw':'B','axr':'B','ux':'B',\n 'b':'C','p':'C','m':'C','em':'C',\n 'aw':'D',\n ' dh':'E','th':'E',\n 'ch':'F','jh':'F','sh':'F','zh':'F',\n 'oy':'G', 'ao':'G',\n 's':'H', 'z':'H',\n 'aa':'I','ae':'I','ah':'I','ay':'I','ey':'I','ih':'I','iy':'I','y':'I','eh':'I','ax-h':'I','ax':'I','ix':'I',\n 'd':'J','l':'J','n':'J','t':'J','el':'J','nx':'J','en':'J','dx':'J',\n 'g':'K','k':'K','ng':'K','eng':'K',\n 'sil':'S','pcl':'S','tcl':'S','kcl':'S','bcl':'S','dcl':'S','gcl':'S','h#':'S','#h':'S','pau':'S','epi':'S'\n }\n return map\n\ndef getPhonemeNumberMap (phonemeMap=\"./phonemeLabelConversion.txt\"):\n phonemeNumberMap = {}\n with open(phonemeMap) as inf:\n for line in inf:\n parts = line.split() # split line into parts\n if len(parts) > 1: # if at least 2 parts/columns\n phonemeNumberMap[str(parts[0])] = parts[1] # part0= frame, part1 = phoneme\n phonemeNumberMap[str(parts[1])] = parts[0]\n return phonemeNumberMap\n\ndef evaluateNetwork (X, y, model_npz_file):\n \n phonemeToViseme = getPhonemeToVisemeMap()\n phonemeNumberMap = getPhonemeNumberMap() #bidirectional map phoneme-number\n for i in range(len(y)):\n y[i] = phonemeToViseme{phonemeNumberMap{y[i]}} #viseme of the phoneme belonging to the y-number\n \n input = T.tensor4('inputs')\n target = T.matrix('targets')\n \n cnn = load_network(model_npz_file)\n test_output = lasagne.layers.get_output(cnn, deterministic=True)\n test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))\n test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)), dtype=theano.config.floatX)\n test_loss = T.mean(T.sqr(T.maximum(0., 1. - target * test_output)))\n test_err = T.mean(T.neq(T.argmax(test_output, axis=1), T.argmax(target, axis=1)), dtype=theano.config.floatX)\n \n val_fn = theano.function([input, target], [test_loss, test_err])\n \n # calculate validation error of whole dataset\n err = 0\n loss = 0\n batches = len(X) / batch_size\n \n for i in range(batches):\n new_loss, new_err = val_fn(X[i * batch_size:(i + 1) * batch_size], y[i * batch_size:(i + 1) * batch_size])\n err += new_err\n loss += new_loss\n \n val_err = err / batches * 100\n val_loss /= batches\n\n print(\" validation loss: \" + str(val_loss))\n print(\" validation error rate: \" + str(val_err) + \"%\")\n print(\" test loss: \" + str(test_loss))\n print(\" test error rate: \" + str(test_err) + \"%\")" }, { "alpha_fraction": 0.5479009747505188, "alphanum_fraction": 0.5791173577308655, "avg_line_length": 27.90625, "blob_id": "0cf8a91f9453d9a5b8ab286fec00d20d92fb0cdd", "content_id": "1ce84e3ca7bbc8bd984bdd2b48edca8cb52e00cf", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 929, "license_type": "permissive", "max_line_length": 87, "num_lines": 32, "path": "/code/audioSR/Spoken-language-identification-master/majority_vote_ensembling.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "\"\"\" Usage: python majority_vote_ensembling.py csv1path csv2path ..\n\"\"\"\nimport sys\nimport numpy as np\n\nn_csv = len(sys.argv) - 1\ntrain_cnt = 12320\n\ncsv = []\nfor index in range(1, len(sys.argv)):\n csv.append(open(sys.argv[index], 'r'))\n \nensembled = open('top3_prediction_ensembled.csv', 'w')\n\nfor iter in range(train_cnt):\n cnt = [0 for i in range(176)]\n avg_prob = np.array([0.0 for i in range(176)])\n\n for index in range(n_csv):\n cur_prob = csv[index].readline().split(',')\n cur_prob = np.array([float(x) for x in cur_prob])\n \n avg_prob += cur_prob\n prediction = cur_prob.argmax()\n cnt[prediction] += 1\n\n\n mas = [(cnt[index], avg_prob[index], index) for index in range(176)]\n mas = sorted(mas, reverse=True)\n \n ensembled.write(str(mas[0][2]) + ',' + str(mas[1][2]) + ',' + str(mas[2][2]) + ',')\n ensembled.write(','.join([str(x) for x in cnt]) + '\\n')\n " }, { "alpha_fraction": 0.5639920234680176, "alphanum_fraction": 0.5780283212661743, "avg_line_length": 43.54679870605469, "blob_id": "eedbaa2fc18871f4dcd36683034977ca3111c05c", "content_id": "06604fe5ebf0daa8334b085f57ce4040e708cd98", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9048, "license_type": "permissive", "max_line_length": 134, "num_lines": 203, "path": "/code/audioSR/Spoken-language-identification-master/theano/networks/tc_net_rnn_shared_pad.py", "repo_name": "matthijsvk/convNets", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\n\nimport theano\nimport theano.tensor as T\n\nimport lasagne\nfrom lasagne import layers\nfrom lasagne.nonlinearities import rectify, softmax, sigmoid, tanh\n\nimport PIL.Image as Image\nfrom base_network import BaseNetwork\n\nfloatX = theano.config.floatX\n\n\nclass Network(BaseNetwork):\n \n def __init__(self, train_list_raw, test_list_raw, png_folder, batch_size, dropout, l2, mode, batch_norm, rnn_num_units, **kwargs):\n \n print \"==> not used params in DMN class:\", kwargs.keys()\n self.train_list_raw = train_list_raw\n self.test_list_raw = test_list_raw\n self.png_folder = png_folder\n self.batch_size = batch_size\n self.dropout = dropout\n self.l2 = l2\n self.mode = mode\n self.batch_norm = batch_norm\n self.num_units = rnn_num_units\n \n self.input_var = T.tensor4('input_var')\n self.answer_var = T.ivector('answer_var')\n \n print \"==> building network\"\n example = np.random.uniform(size=(self.batch_size, 1, 128, 858), low=0.0, high=1.0).astype(np.float32) #########\n answer = np.random.randint(low=0, high=176, size=(self.batch_size,)) #########\n \n network = layers.InputLayer(shape=(None, 1, 128, 858), input_var=self.input_var)\n print layers.get_output(network).eval({self.input_var:example}).shape\n \n # CONV-RELU-POOL 1\n network = layers.Conv2DLayer(incoming=network, num_filters=16, filter_size=(7, 7), \n stride=1, nonlinearity=rectify)\n print layers.get_output(network).eval({self.input_var:example}).shape\n network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)\n print layers.get_output(network).eval({self.input_var:example}).shape\n if (self.batch_norm):\n network = layers.BatchNormLayer(incoming=network)\n \n # CONV-RELU-POOL 2\n network = layers.Conv2DLayer(incoming=network, num_filters=32, filter_size=(5, 5), \n stride=1, nonlinearity=rectify)\n print layers.get_output(network).eval({self.input_var:example}).shape\n network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)\n print layers.get_output(network).eval({self.input_var:example}).shape\n if (self.batch_norm):\n network = layers.BatchNormLayer(incoming=network)\n\n \n # CONV-RELU-POOL 3\n network = layers.Conv2DLayer(incoming=network, num_filters=32, filter_size=(3, 3), \n stride=1, nonlinearity=rectify)\n print layers.get_output(network).eval({self.input_var:example}).shape\n network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)\n print layers.get_output(network).eval({self.input_var:example}).shape\n if (self.batch_norm):\n network = layers.BatchNormLayer(incoming=network)\n \n # CONV-RELU-POOL 4\n network = layers.Conv2DLayer(incoming=network, num_filters=32, filter_size=(3, 3), \n stride=1, nonlinearity=rectify)\n print layers.get_output(network).eval({self.input_var:example}).shape\n network = layers.MaxPool2DLayer(incoming=network, pool_size=(3, 3), stride=2, pad=2)\n print layers.get_output(network).eval({self.input_var:example}).shape\n if (self.batch_norm):\n network = layers.BatchNormLayer(incoming=network)\n \n self.params = layers.get_all_params(network, trainable=True)\n \n output = layers.get_output(network)\n num_channels = 32 \n filter_W = 54\n filter_H = 8\n \n # NOTE: these constants are shapes of last pool layer, it can be symbolic \n # explicit values are better for optimizations\n \n channels = []\n for channel_index in range(num_channels):\n channels.append(output[:, channel_index, :, :].transpose((0, 2, 1)))\n \n rnn_network_outputs = []\n W_in_to_updategate = None\n W_hid_to_updategate = None\n b_updategate = None\n W_in_to_resetgate = None\n W_hid_to_resetgate = None\n b_resetgate = None\n W_in_to_hidden_update = None\n W_hid_to_hidden_update = None\n b_hidden_update = None\n \n for channel_index in range(num_channels):\n rnn_input_var = channels[channel_index]\n \n # InputLayer \n network = layers.InputLayer(shape=(None, filter_W, filter_H), input_var=rnn_input_var)\n\n if (channel_index == 0):\n # GRULayer\n network = layers.GRULayer(incoming=network, num_units=self.num_units, only_return_final=True)\n W_in_to_updategate = network.W_in_to_updategate\n W_hid_to_updategate = network.W_hid_to_updategate\n b_updategate = network.b_updategate\n W_in_to_resetgate = network.W_in_to_resetgate\n W_hid_to_resetgate = network.W_hid_to_resetgate\n b_resetgate = network.b_resetgate\n W_in_to_hidden_update = network.W_in_to_hidden_update\n W_hid_to_hidden_update = network.W_hid_to_hidden_update\n b_hidden_update = network.b_hidden_update\n \n # add params \n self.params += layers.get_all_params(network, trainable=True)\n\n else:\n # GRULayer, but shared\n network = layers.GRULayer(incoming=network, num_units=self.num_units, only_return_final=True,\n resetgate=layers.Gate(W_in=W_in_to_resetgate, W_hid=W_hid_to_resetgate, b=b_resetgate),\n updategate=layers.Gate(W_in=W_in_to_updategate, W_hid=W_hid_to_updategate, b=b_updategate),\n hidden_update=layers.Gate(W_in=W_in_to_hidden_update, W_hid=W_hid_to_hidden_update, b=b_hidden_update))\n \n \n \n rnn_network_outputs.append(layers.get_output(network))\n \n all_output_var = T.concatenate(rnn_network_outputs, axis=1)\n print all_output_var.eval({self.input_var:example}).shape\n \n # InputLayer\n network = layers.InputLayer(shape=(None, self.num_units * num_channels), input_var=all_output_var)\n \n # Dropout Layer\n if (self.dropout > 0):\n network = layers.dropout(network, self.dropout)\n \n # BatchNormalization Layer\n if (self.batch_norm):\n network = layers.BatchNormLayer(incoming=network)\n \n # Last layer: classification\n network = layers.DenseLayer(incoming=network, num_units=176, nonlinearity=softmax)\n print layers.get_output(network).eval({self.input_var:example}).shape\n \n \n self.params += layers.get_all_params(network, trainable=True)\n self.prediction = layers.get_output(network)\n \n #print \"==> param shapes\", [x.eval().shape for x in self.params]\n \n self.loss_ce = lasagne.objectives.categorical_crossentropy(self.prediction, self.answer_var).mean()\n if (self.l2 > 0):\n self.loss_l2 = self.l2 * lasagne.regularization.apply_penalty(self.params, \n lasagne.regularization.l2)\n else:\n self.loss_l2 = 0\n self.loss = self.loss_ce + self.loss_l2\n \n #updates = lasagne.updates.adadelta(self.loss, self.params)\n updates = lasagne.updates.momentum(self.loss, self.params, learning_rate=0.003)\n \n if self.mode == 'train':\n print \"==> compiling train_fn\"\n self.train_fn = theano.function(inputs=[self.input_var, self.answer_var], \n outputs=[self.prediction, self.loss],\n updates=updates)\n \n print \"==> compiling test_fn\"\n self.test_fn = theano.function(inputs=[self.input_var, self.answer_var],\n outputs=[self.prediction, self.loss])\n \n \n def say_name(self):\n return \"tc_net_rnn.4conv.pad.GRU.shared.num_units%d.5khz\" % self.num_units\n \n \n def read_batch(self, data_raw, batch_index):\n\n start_index = batch_index * self.batch_size\n end_index = start_index + self.batch_size\n \n data = np.zeros((self.batch_size, 1, 128, 858), dtype=np.float32)\n answers = []\n \n for i in range(start_index, end_index):\n answers.append(int(data_raw[i].split(',')[1]))\n name = data_raw[i].split(',')[0]\n path = self.png_folder + name + \".png\"\n im = Image.open(path)\n data[i - start_index, 0, :, :] = np.array(im).astype(np.float32)[:128, :] / 256.0\n\n answers = np.array(answers, dtype=np.int32)\n return data, answers\n \n" } ]
54